Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
2,600
def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs): try: old_annotation = sender.objects.get(id=instance.id) except Annotation.DoesNotExist: # annotation just created - do nothing return old_annotation.decrease_project_summary_counters() # update task counters if annotation changes it's was_cancelled status task = instance.task if old_annotation.was_cancelled != instance.was_cancelled: if instance.was_cancelled: task.cancelled_annotations = task.cancelled_annotations + 1 task.total_annotations = task.total_annotations - 1 else: task.cancelled_annotations = task.cancelled_annotations - 1 task.total_annotations = task.total_annotations + 1 task.update_is_labeled() Task.objects.filter(id=instance.task.id).update( is_labeled=task.is_labeled, total_annotations=task.total_annotations, cancelled_annotations=task.cancelled_annotations ) @receiver(post_save, sender=Annotation)
Before updating annotation fields - ensure previous info removed from project.summary
11
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs): try: old_annotation = sender.objects.get(id=instance.id) except Annotation.DoesNotExist: # annotation just created - do nothing return old_annotation.decrease_project_summary_counters() # update task counters if annotation changes it's was_cancelled status task = instance.task if old_annotation.was_cancelled != instance.was_cancelled: if instance.was_cancelled: task.cancelled_annotations = task.cancelled_annotations + 1 task.total_annotations = task.total_annotations - 1 else: task.cancelled_annotations = task.cancelled_annotations - 1 task.total_annotations = task.total_annotations + 1 task.update_is_labeled() Task.objects.filter(id=instance.task.id).update( is_labeled=task.is_labeled, total_annotations=task.total_annotations, cancelled_annotations=task.cancelled_annotations ) @receiver(post_save, sender=Annotation) ``` ###Assistant : Before updating annotation fields - ensure previous info removed from project.summary
2,601
def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads): # 对运行位置进行配置 if use_gpu: try: int(os.environ.get('CUDA_VISIBLE_DEVICES')) except Exception: print( ) use_gpu = False if os.path.isdir(modelpath): if os.path.exists(os.path.join(modelpath, "__params__")): # __model__ + __params__ model = os.path.join(modelpath, "__model__") params = os.path.join(modelpath, "__params__") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, "params")): # model + params model = os.path.join(modelpath, "model") params = os.path.join(modelpath, "params") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, "__model__")): # __model__ + others config = Config(modelpath) else: raise Exception( "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath)) elif os.path.exists(modelpath + ".pdmodel"): # *.pdmodel + *.pdiparams model = modelpath + ".pdmodel" params = modelpath + ".pdiparams" config = Config(model, params) elif isinstance(modelpath, Config): config = modelpath else: raise Exception( "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath)) # 设置参数 if use_gpu: config.enable_use_gpu(100, gpu_id) else: config.disable_gpu() config.set_cpu_math_library_num_threads(cpu_threads) if use_mkldnn: config.enable_mkldnn() config.disable_glog_info() # 返回配置 return config # 预测器创建函数
load the model config modelpath: inference model path use_gpu: use gpu or not use_mkldnn: use mkldnn or not Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU. Now switch to CPU to continue...
38
151
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads): # 对运行位置进行配置 if use_gpu: try: int(os.environ.get('CUDA_VISIBLE_DEVICES')) except Exception: print( ) use_gpu = False if os.path.isdir(modelpath): if os.path.exists(os.path.join(modelpath, "__params__")): # __model__ + __params__ model = os.path.join(modelpath, "__model__") params = os.path.join(modelpath, "__params__") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, "params")): # model + params model = os.path.join(modelpath, "model") params = os.path.join(modelpath, "params") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, "__model__")): # __model__ + others config = Config(modelpath) else: raise Exception( "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath)) elif os.path.exists(modelpath + ".pdmodel"): # *.pdmodel + *.pdiparams model = modelpath + ".pdmodel" params = modelpath + ".pdiparams" config = Config(model, params) elif isinstance(modelpath, Config): config = modelpath else: raise Exception( "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath)) # 设置参数 if use_gpu: config.enable_use_gpu(100, gpu_id) else: config.disable_gpu() config.set_cpu_math_library_num_threads(cpu_threads) if use_mkldnn: config.enable_mkldnn() config.disable_glog_info() # 返回配置 return config # 预测器创建函数 ``` ###Assistant : load the model config modelpath: inference model path use_gpu: use gpu or not use_mkldnn: use mkldnn or not Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU. Now switch to CPU to continue...
2,602
def trim_line(line, column=0): line = line.strip("\n") ll = len(line) if ll <= 150: return line if column > ll: column = ll start = max(column - 60, 0) # Round down if it brings us close to the edge if start < 5: start = 0 end = min(start + 140, ll) # Round up to the end if it's close if end > ll - 5: end = ll # If we are bumped all the way to the end, # make sure we still get a full 140 characters in the line if end == ll: start = max(end - 140, 0) line = line[start:end] if end < ll: # we've snipped from the end line += " {snip}" if start > 0: # we've snipped from the beginning line = "{snip} " + line return line
Trims a line down to a goal of 140 characters, with a little wiggle room to be sensible and tries to trim around the given `column`. So it tries to extract 60 characters before and after the provided `column` and yield a better context.
44
139
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def trim_line(line, column=0): line = line.strip("\n") ll = len(line) if ll <= 150: return line if column > ll: column = ll start = max(column - 60, 0) # Round down if it brings us close to the edge if start < 5: start = 0 end = min(start + 140, ll) # Round up to the end if it's close if end > ll - 5: end = ll # If we are bumped all the way to the end, # make sure we still get a full 140 characters in the line if end == ll: start = max(end - 140, 0) line = line[start:end] if end < ll: # we've snipped from the end line += " {snip}" if start > 0: # we've snipped from the beginning line = "{snip} " + line return line ``` ###Assistant : Trims a line down to a goal of 140 characters, with a little wiggle room to be sensible and tries to trim around the given `column`. So it tries to extract 60 characters before and after the provided `column` and yield a better context.
2,603
def test_background_update_default_batch_set_by_config(self): self.get_success( self.store.db_pool.simple_insert( "background_updates", values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, ) ) self.update_handler.side_effect = self.update self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), by=0.01, ) self.assertFalse(res) # on the first call, we should get run with the default background update size specified in the config self.update_handler.assert_called_once_with({"my_key": 1}, 20)
Test that the background update is run with the default_batch_size set by the config
14
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_background_update_default_batch_set_by_config(self): self.get_success( self.store.db_pool.simple_insert( "background_updates", values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, ) ) self.update_handler.side_effect = self.update self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), by=0.01, ) self.assertFalse(res) # on the first call, we should get run with the default background update size specified in the config self.update_handler.assert_called_once_with({"my_key": 1}, 20) ``` ###Assistant : Test that the background update is run with the default_batch_size set by the config
2,604
def _expiry_date(self, session_data): return session_data.get("_session_expiry") or ( self._last_modification() + datetime.timedelta(seconds=self.get_session_cookie_age()) )
Return the expiry time of the file storing the session's content.
11
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _expiry_date(self, session_data): return session_data.get("_session_expiry") or ( self._last_modification() + datetime.timedelta(seconds=self.get_session_cookie_age()) ) ``` ###Assistant : Return the expiry time of the file storing the session's content.
2,605
def __call__(self, *args, **kwargs): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop("images", None) text = kwargs.pop("text", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.feature_extractor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs
When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
46
89
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, *args, **kwargs): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop("images", None) text = kwargs.pop("text", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.feature_extractor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs ``` ###Assistant : When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
2,606
def get_report(module_name, report_name): reports = get_reports() module = reports.get(module_name) if module is None: return None report = module.get(report_name) if report is None: return None return report
Return a specific report from within a module.
8
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_report(module_name, report_name): reports = get_reports() module = reports.get(module_name) if module is None: return None report = module.get(report_name) if report is None: return None return report ``` ###Assistant : Return a specific report from within a module.
2,607
def test_worker_duty_configs(self) -> None: worker1_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker1", extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", }, ) self.assertFalse(worker1_config.should_notify_appservices) self.assertTrue(worker1_config.should_update_user_directory) worker2_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker2", extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory)
Additional tests for the worker duties
6
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_worker_duty_configs(self) -> None: worker1_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker1", extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", }, ) self.assertFalse(worker1_config.should_notify_appservices) self.assertTrue(worker1_config.should_update_user_directory) worker2_config = self._make_worker_config( worker_app="synapse.app.generic_worker", worker_name="worker2", extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory) ``` ###Assistant : Additional tests for the worker duties
2,608
def actor_id(self): # only worker mode has actor_id assert ( self.worker.mode == ray.worker.WORKER_MODE ), f"This method is only available when the process is a\ worker. Current mode: {self.worker.mode}" actor_id = self.worker.actor_id return actor_id if not actor_id.is_nil() else None
Get the current actor ID in this worker. ID of the actor of the current process. This shouldn't be used in a driver process. Returns: The current actor id in this worker. None if there's no actor id.
38
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def actor_id(self): # only worker mode has actor_id assert ( self.worker.mode == ray.worker.WORKER_MODE ), f"This method is only available when the process is a\ worker. Current mode: {self.worker.mode}" actor_id = self.worker.actor_id return actor_id if not actor_id.is_nil() else None ``` ###Assistant : Get the current actor ID in this worker. ID of the actor of the current process. This shouldn't be used in a driver process. Returns: The current actor id in this worker. None if there's no actor id.
2,609
def test_redirect_to_current(self): start_url = reverse("wagtailsettings:edit", args=["tests", "testsetting"]) dest_url = reverse( "wagtailsettings:edit", args=["tests", "testsetting", self.other_site.pk] ) response = self.client.get( start_url, follow=True, HTTP_HOST=self.other_site.hostname ) self.assertRedirects( response, dest_url, status_code=302, fetch_redirect_response=False )
Should redirect to the setting for the current site taken from the URL, by default
15
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_redirect_to_current(self): start_url = reverse("wagtailsettings:edit", args=["tests", "testsetting"]) dest_url = reverse( "wagtailsettings:edit", args=["tests", "testsetting", self.other_site.pk] ) response = self.client.get( start_url, follow=True, HTTP_HOST=self.other_site.hostname ) self.assertRedirects( response, dest_url, status_code=302, fetch_redirect_response=False ) ``` ###Assistant : Should redirect to the setting for the current site taken from the URL, by default
2,610
def fit(self, X, y, sample_weight=None, fit_params=None): if not hasattr(self.estimator, "fit"): raise ValueError("The base estimator should implement a fit method") y = self._validate_data(X="no_validation", y=y, multi_output=True) if is_classifier(self): check_classification_targets(y) if y.ndim == 1: raise ValueError( "y must have at least two dimensions for " "multi-output regression but has only one." ) if sample_weight is not None and not has_fit_parameter( self.estimator, "sample_weight" ): raise ValueError("Underlying estimator does not support sample weights.") if not fit_params: fit_params = [None] * y.shape[1] self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_estimator)( self.estimator, X, y[:, i], sample_weight, **fit_params[i] ) for i in range(y.shape[1]) ) self.classes_ = [] for estimator in self.estimators_: self.classes_.extend(estimator.classes_) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self
Fit the model to data, separately for each output variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. An indicator matrix turns on multilabel estimation. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying classifier supports sample weights. fit_params : A list of dicts for the fit_params Parameters passed to the ``estimator.fit`` method of each step. Each dict may contain same or different values (e.g. different eval_sets or init_models) .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance.
110
114
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y, sample_weight=None, fit_params=None): if not hasattr(self.estimator, "fit"): raise ValueError("The base estimator should implement a fit method") y = self._validate_data(X="no_validation", y=y, multi_output=True) if is_classifier(self): check_classification_targets(y) if y.ndim == 1: raise ValueError( "y must have at least two dimensions for " "multi-output regression but has only one." ) if sample_weight is not None and not has_fit_parameter( self.estimator, "sample_weight" ): raise ValueError("Underlying estimator does not support sample weights.") if not fit_params: fit_params = [None] * y.shape[1] self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_estimator)( self.estimator, X, y[:, i], sample_weight, **fit_params[i] ) for i in range(y.shape[1]) ) self.classes_ = [] for estimator in self.estimators_: self.classes_.extend(estimator.classes_) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self ``` ###Assistant : Fit the model to data, separately for each output variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. An indicator matrix turns on multilabel estimation. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying classifier supports sample weights. fit_params : A list of dicts for the fit_params Parameters passed to the ``estimator.fit`` method of each step. Each dict may contain same or different values (e.g. different eval_sets or init_models) .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance.
2,611
def inner_choices(self) -> Iterable['ValueChoice']: for arg in self.arguments: if isinstance(arg, ValueChoiceX): yield from arg.inner_choices()
Return an iterable of all leaf value choices. Useful for composition of value choices. No deduplication on labels. Mutators should take care.
22
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inner_choices(self) -> Iterable['ValueChoice']: for arg in self.arguments: if isinstance(arg, ValueChoiceX): yield from arg.inner_choices() ``` ###Assistant : Return an iterable of all leaf value choices. Useful for composition of value choices. No deduplication on labels. Mutators should take care.
2,612
def get_indices(expr): # We call ourself recursively to determine indices of sub expressions. # break recursion if isinstance(expr, Indexed): c = expr.indices inds, dummies = _remove_repeated(c) return inds, {} elif expr is None: return set(), {} elif isinstance(expr, Idx): return {expr}, {} elif expr.is_Atom: return set(), {} # recurse via specialized functions else: if expr.is_Mul: return _get_indices_Mul(expr) elif expr.is_Add: return _get_indices_Add(expr) elif expr.is_Pow or isinstance(expr, exp): return _get_indices_Pow(expr) elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return set(), {} elif isinstance(expr, Function): # Support ufunc like behaviour by returning indices from arguments. # Functions do not interpret repeated indices across arguments # as summation ind0 = set() for arg in expr.args: ind, sym = get_indices(arg) ind0 |= ind return ind0, sym # this test is expensive, so it should be at the end elif not expr.has(Indexed): return set(), {} raise NotImplementedError( "FIXME: No specialized handling of type %s" % type(expr))
Determine the outer indices of expression ``expr`` By *outer* we mean indices that are not summation indices. Returns a set and a dict. The set contains outer indices and the dict contains information about index symmetries. Examples ======== >>> from sympy.tensor.index_methods import get_indices >>> from sympy import symbols >>> from sympy.tensor import IndexedBase >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, a, z = symbols('i j a z', integer=True) The indices of the total expression is determined, Repeated indices imply a summation, for instance the trace of a matrix A: >>> get_indices(A[i, i]) (set(), {}) In the case of many terms, the terms are required to have identical outer indices. Else an IndexConformanceException is raised. >>> get_indices(x[i] + A[i, j]*y[j]) ({i}, {}) :Exceptions: An IndexConformanceException means that the terms ar not compatible, e.g. >>> get_indices(x[i] + y[j]) #doctest: +SKIP (...) IndexConformanceException: Indices are not consistent: x(i) + y(j) .. warning:: The concept of *outer* indices applies recursively, starting on the deepest level. This implies that dummies inside parenthesis are assumed to be summed first, so that the following expression is handled gracefully: >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) ({i, j}, {}) This is correct and may appear convenient, but you need to be careful with this as SymPy will happily .expand() the product, if requested. The resulting expression would mix the outer ``j`` with the dummies inside the parenthesis, which makes it a different expression. To be on the safe side, it is best to avoid such ambiguities by using unique indices for all contractions that should be held separate.
263
152
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_indices(expr): # We call ourself recursively to determine indices of sub expressions. # break recursion if isinstance(expr, Indexed): c = expr.indices inds, dummies = _remove_repeated(c) return inds, {} elif expr is None: return set(), {} elif isinstance(expr, Idx): return {expr}, {} elif expr.is_Atom: return set(), {} # recurse via specialized functions else: if expr.is_Mul: return _get_indices_Mul(expr) elif expr.is_Add: return _get_indices_Add(expr) elif expr.is_Pow or isinstance(expr, exp): return _get_indices_Pow(expr) elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return set(), {} elif isinstance(expr, Function): # Support ufunc like behaviour by returning indices from arguments. # Functions do not interpret repeated indices across arguments # as summation ind0 = set() for arg in expr.args: ind, sym = get_indices(arg) ind0 |= ind return ind0, sym # this test is expensive, so it should be at the end elif not expr.has(Indexed): return set(), {} raise NotImplementedError( "FIXME: No specialized handling of type %s" % type(expr)) ``` ###Assistant : Determine the outer indices of expression ``expr`` By *outer* we mean indices that are not summation indices. Returns a set and a dict. The set contains outer indices and the dict contains information about index symmetries. Examples ======== >>> from sympy.tensor.index_methods import get_indices >>> from sympy import symbols >>> from sympy.tensor import IndexedBase >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, a, z = symbols('i j a z', integer=True) The indices of the total expression is determined, Repeated indices imply a summation, for instance the trace of a matrix A: >>> get_indices(A[i, i]) (set(), {}) In the case of many terms, the terms are required to have identical outer indices. Else an IndexConformanceException is raised. >>> get_indices(x[i] + A[i, j]*y[j]) ({i}, {}) :Exceptions: An IndexConformanceException means that the terms ar not compatible, e.g. >>> get_indices(x[i] + y[j]) #doctest: +SKIP (...) IndexConformanceException: Indices are not consistent: x(i) + y(j) .. warning:: The concept of *outer* indices applies recursively, starting on the deepest level. This implies that dummies inside parenthesis are assumed to be summed first, so that the following expression is handled gracefully: >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) ({i, j}, {}) This is correct and may appear convenient, but you need to be careful with this as SymPy will happily .expand() the product, if requested. The resulting expression would mix the outer ``j`` with the dummies inside the parenthesis, which makes it a different expression. To be on the safe side, it is best to avoid such ambiguities by using unique indices for all contractions that should be held separate.
2,613
def dist_location(dist): # type: (Distribution) -> str egg_link = egg_link_path(dist) if egg_link: return normalize_path(egg_link) return normalize_path(dist.location)
Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. The returned location is normalized (in particular, with symlinks removed).
45
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dist_location(dist): # type: (Distribution) -> str egg_link = egg_link_path(dist) if egg_link: return normalize_path(egg_link) return normalize_path(dist.location) ``` ###Assistant : Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. The returned location is normalized (in particular, with symlinks removed).
2,614
def can_connect(url, error_classes=None): if error_classes is None: error_classes = _get_default_network_errors() try: with urlopen(url, timeout=20) as response: # Timeout just in case rate-limiting is applied if response.status != 200: return False except error_classes: return False else: return True # ------------------------------------------------------------------ # File-IO
Try to connect to the given url. True if succeeds, False if OSError raised Parameters ---------- url : basestring The URL to try to connect to Returns ------- connectable : bool Return True if no OSError (unable to connect) or URLError (bad url) was raised
45
41
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def can_connect(url, error_classes=None): if error_classes is None: error_classes = _get_default_network_errors() try: with urlopen(url, timeout=20) as response: # Timeout just in case rate-limiting is applied if response.status != 200: return False except error_classes: return False else: return True # ------------------------------------------------------------------ # File-IO ``` ###Assistant : Try to connect to the given url. True if succeeds, False if OSError raised Parameters ---------- url : basestring The URL to try to connect to Returns ------- connectable : bool Return True if no OSError (unable to connect) or URLError (bad url) was raised
2,615
def test_valid_slack_channel_id(self): integration = Integration.objects.create( external_id="1", provider="slack", metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"}, ) integration.add_organization(self.organization, self.user) base_params = self.valid_params.copy() base_params.update( { "type": AlertRuleTriggerAction.get_registered_type( AlertRuleTriggerAction.Type.SLACK ).slug, "targetType": ACTION_TARGET_TYPE_TO_STRING[ AlertRuleTriggerAction.TargetType.SPECIFIC ], "targetIdentifier": "merp", "integration": str(integration.id), } ) context = self.context.copy() context.update({"input_channel_id": "CSVK0921"}) responses.add( method=responses.GET, url="https://slack.com/api/conversations.info", status=200, content_type="application/json", body=json.dumps({"ok": "true", "channel": {"name": "merp", "id": "CSVK0921"}}), ) serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params) assert serializer.is_valid() serializer.save() # # Make sure the action was created. alert_rule_trigger_actions = list( AlertRuleTriggerAction.objects.filter(integration=integration) ) assert len(alert_rule_trigger_actions) == 1
Test that when a valid Slack channel ID is provided, we look up the channel name and validate it against the targetIdentifier.
22
73
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_valid_slack_channel_id(self): integration = Integration.objects.create( external_id="1", provider="slack", metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"}, ) integration.add_organization(self.organization, self.user) base_params = self.valid_params.copy() base_params.update( { "type": AlertRuleTriggerAction.get_registered_type( AlertRuleTriggerAction.Type.SLACK ).slug, "targetType": ACTION_TARGET_TYPE_TO_STRING[ AlertRuleTriggerAction.TargetType.SPECIFIC ], "targetIdentifier": "merp", "integration": str(integration.id), } ) context = self.context.copy() context.update({"input_channel_id": "CSVK0921"}) responses.add( method=responses.GET, url="https://slack.com/api/conversations.info", status=200, content_type="application/json", body=json.dumps({"ok": "true", "channel": {"name": "merp", "id": "CSVK0921"}}), ) serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params) assert serializer.is_valid() serializer.save() # # Make sure the action was created. alert_rule_trigger_actions = list( AlertRuleTriggerAction.objects.filter(integration=integration) ) assert len(alert_rule_trigger_actions) == 1 ``` ###Assistant : Test that when a valid Slack channel ID is provided, we look up the channel name and validate it against the targetIdentifier.
2,616
def get_legacy(members): if AIX_ABI == 64: # AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o expr = r'shr4?_?64\.o' member = get_one_match(expr, members) if member: return member else: # 32-bit legacy names - both shr.o and shr4.o exist. # shr.o is the preferred name so we look for shr.o first # i.e., shr4.o is returned only when shr.o does not exist for name in ['shr.o', 'shr4.o']: member = get_one_match(re.escape(name), members) if member: return member return None
This routine provides historical aka legacy naming schemes started in AIX4 shared library support for library members names. e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and shr_64.o for 64-bit binary.
33
77
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_legacy(members): if AIX_ABI == 64: # AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o expr = r'shr4?_?64\.o' member = get_one_match(expr, members) if member: return member else: # 32-bit legacy names - both shr.o and shr4.o exist. # shr.o is the preferred name so we look for shr.o first # i.e., shr4.o is returned only when shr.o does not exist for name in ['shr.o', 'shr4.o']: member = get_one_match(re.escape(name), members) if member: return member return None ``` ###Assistant : This routine provides historical aka legacy naming schemes started in AIX4 shared library support for library members names. e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and shr_64.o for 64-bit binary.
2,617
def set_policy(name, table="filter", family="ipv4", **kwargs): ret = {"name": name, "changes": {}, "result": None, "comment": ""} for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if ( __salt__["iptables.get_policy"](table, kwargs["chain"], family) == kwargs["policy"] ): ret["result"] = True ret[ "comment" ] = "iptables default policy for chain {} on table {} for {} already set to {}".format( kwargs["chain"], table, family, kwargs["policy"] ) return ret if __opts__["test"]: ret["comment"] = ( "iptables default policy for chain {} on table {} for {} needs to be set" " to {}".format(kwargs["chain"], table, family, kwargs["policy"]) ) return ret if not __salt__["iptables.set_policy"]( table, kwargs["chain"], kwargs["policy"], family ): ret["changes"] = {"locale": name} ret["result"] = True ret["comment"] = "Set default policy for {} to {} family {}".format( kwargs["chain"], kwargs["policy"], family ) if "save" in kwargs and kwargs["save"]: if kwargs["save"] is not True: filename = kwargs["save"] else: filename = None __salt__["iptables.save"](filename=filename, family=family) ret[ "comment" ] = "Set and saved default policy for {} to {} family {}".format( kwargs["chain"], kwargs["policy"], family ) return ret else: ret["result"] = False ret["comment"] = "Failed to set iptables default policy" return ret
.. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy save If set to a true value, the new iptables rules for the given family will be saved to a file. See the ``append`` state for more details.
62
176
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_policy(name, table="filter", family="ipv4", **kwargs): ret = {"name": name, "changes": {}, "result": None, "comment": ""} for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if ( __salt__["iptables.get_policy"](table, kwargs["chain"], family) == kwargs["policy"] ): ret["result"] = True ret[ "comment" ] = "iptables default policy for chain {} on table {} for {} already set to {}".format( kwargs["chain"], table, family, kwargs["policy"] ) return ret if __opts__["test"]: ret["comment"] = ( "iptables default policy for chain {} on table {} for {} needs to be set" " to {}".format(kwargs["chain"], table, family, kwargs["policy"]) ) return ret if not __salt__["iptables.set_policy"]( table, kwargs["chain"], kwargs["policy"], family ): ret["changes"] = {"locale": name} ret["result"] = True ret["comment"] = "Set default policy for {} to {} family {}".format( kwargs["chain"], kwargs["policy"], family ) if "save" in kwargs and kwargs["save"]: if kwargs["save"] is not True: filename = kwargs["save"] else: filename = None __salt__["iptables.save"](filename=filename, family=family) ret[ "comment" ] = "Set and saved default policy for {} to {} family {}".format( kwargs["chain"], kwargs["policy"], family ) return ret else: ret["result"] = False ret["comment"] = "Failed to set iptables default policy" return ret ``` ###Assistant : .. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy save If set to a true value, the new iptables rules for the given family will be saved to a file. See the ``append`` state for more details.
2,618
def _after_start(self): delay = self.request.config.getoption('--qute-delay-start') if delay: with self.disable_capturing(): print(f"- waiting {delay}ms for quteprocess " f"(PID: {self.proc.processId()})...") time.sleep(delay / 1000)
Wait before continuing if requested, e.g. for debugger attachment.
9
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _after_start(self): delay = self.request.config.getoption('--qute-delay-start') if delay: with self.disable_capturing(): print(f"- waiting {delay}ms for quteprocess " f"(PID: {self.proc.processId()})...") time.sleep(delay / 1000) ``` ###Assistant : Wait before continuing if requested, e.g. for debugger attachment.
2,619
def list_secrets(path, default=None): if default is None: default = CommandExecutionError log.debug("Listing vault secret keys for %s in %s", __grains__["id"], path) version2 = __utils__["vault.is_v2"](path) if version2["v2"]: path = version2["metadata"] try: url = "v1/{}".format(path) response = __utils__["vault.make_request"]("LIST", url) if response.status_code != 200: response.raise_for_status() return response.json()["data"] except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( "Failed to list secrets! {}: {}".format(type(err).__name__, err) ) return default
.. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. List secret keys at the path in vault. The vault policy used must allow this. The path should end with a trailing slash. CLI Example: .. code-block:: bash salt '*' vault.list_secrets "secret/my/"
60
66
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_secrets(path, default=None): if default is None: default = CommandExecutionError log.debug("Listing vault secret keys for %s in %s", __grains__["id"], path) version2 = __utils__["vault.is_v2"](path) if version2["v2"]: path = version2["metadata"] try: url = "v1/{}".format(path) response = __utils__["vault.make_request"]("LIST", url) if response.status_code != 200: response.raise_for_status() return response.json()["data"] except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( "Failed to list secrets! {}: {}".format(type(err).__name__, err) ) return default ``` ###Assistant : .. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. List secret keys at the path in vault. The vault policy used must allow this. The path should end with a trailing slash. CLI Example: .. code-block:: bash salt '*' vault.list_secrets "secret/my/"
2,620
def test_custom_kwargs_sharded(tmpdir, cls): strategy = cls(reduce_fp16=True) strategy.model = Mock(spec=LightningModule) strategy.model.trainer = Mock() class_name = "sharded" if isinstance(strategy, DDPShardedStrategy) else "sharded_spawn" with mock.patch(f"pytorch_lightning.strategies.{class_name}.ShardedDataParallel", autospec=True) as mock_sharded: strategy.configure_ddp() args, kwargs = mock_sharded.call_args assert "reduce_fp16" in kwargs assert kwargs["reduce_fp16"] @RunIf(skip_windows=True, fairscale=True) @mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True) @pytest.mark.parametrize(["params", "expected_buffer_size"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)]) @pytest.mark.parametrize("num_nodes", [1, 2])
Tests to ensure that if custom kwargs are passed, they are set correctly.
13
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_custom_kwargs_sharded(tmpdir, cls): strategy = cls(reduce_fp16=True) strategy.model = Mock(spec=LightningModule) strategy.model.trainer = Mock() class_name = "sharded" if isinstance(strategy, DDPShardedStrategy) else "sharded_spawn" with mock.patch(f"pytorch_lightning.strategies.{class_name}.ShardedDataParallel", autospec=True) as mock_sharded: strategy.configure_ddp() args, kwargs = mock_sharded.call_args assert "reduce_fp16" in kwargs assert kwargs["reduce_fp16"] @RunIf(skip_windows=True, fairscale=True) @mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True) @pytest.mark.parametrize(["params", "expected_buffer_size"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)]) @pytest.mark.parametrize("num_nodes", [1, 2]) ``` ###Assistant : Tests to ensure that if custom kwargs are passed, they are set correctly.
2,621
def versions_from_parentdir(parentdir_prefix, root, verbose): rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = %s
Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory # This file was generated by 'versioneer.py' (0.21) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = # END VERSION_JSON
84
58
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def versions_from_parentdir(parentdir_prefix, root, verbose): rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = %s ``` ###Assistant : Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory # This file was generated by 'versioneer.py' (0.21) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = # END VERSION_JSON
2,622
def response_add(self, request, obj, post_url_continue=None): opts = obj._meta preserved_filters = self.get_preserved_filters(request) obj_url = reverse( "admin:%s_%s_change" % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) # Add a link to the object's change form if the user can edit the obj. if self.has_change_permission(request, obj): obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj) else: obj_repr = str(obj) msg_dict = { "name": opts.verbose_name, "obj": obj_repr, } # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) popup_response_data = json.dumps( { "value": str(value), "obj": str(obj), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (opts.app_label, opts.model_name), "admin/%s/popup_response.html" % opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) elif "_continue" in request.POST or ( # Redirecting after "Save as new". "_saveasnew" in request.POST and self.save_as_continue and self.has_change_permission(request, obj) ): msg = _("The {name} “{obj}” was added successfully.") if self.has_change_permission(request, obj): msg += " " + _("You may edit it again below.") self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS) if post_url_continue is None: post_url_continue = obj_url post_url_continue = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, post_url_continue, ) return HttpResponseRedirect(post_url_continue) elif "_addanother" in request.POST: msg = format_html( _( "The {name} “{obj}” was added successfully. You may add another {name} below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _("The {name} “{obj}” was added successfully."), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj)
Determine the HttpResponse for the add_view stage.
7
248
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def response_add(self, request, obj, post_url_continue=None): opts = obj._meta preserved_filters = self.get_preserved_filters(request) obj_url = reverse( "admin:%s_%s_change" % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) # Add a link to the object's change form if the user can edit the obj. if self.has_change_permission(request, obj): obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj) else: obj_repr = str(obj) msg_dict = { "name": opts.verbose_name, "obj": obj_repr, } # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) popup_response_data = json.dumps( { "value": str(value), "obj": str(obj), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (opts.app_label, opts.model_name), "admin/%s/popup_response.html" % opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) elif "_continue" in request.POST or ( # Redirecting after "Save as new". "_saveasnew" in request.POST and self.save_as_continue and self.has_change_permission(request, obj) ): msg = _("The {name} “{obj}” was added successfully.") if self.has_change_permission(request, obj): msg += " " + _("You may edit it again below.") self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS) if post_url_continue is None: post_url_continue = obj_url post_url_continue = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, post_url_continue, ) return HttpResponseRedirect(post_url_continue) elif "_addanother" in request.POST: msg = format_html( _( "The {name} “{obj}” was added successfully. You may add another {name} below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _("The {name} “{obj}” was added successfully."), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) ``` ###Assistant : Determine the HttpResponse for the add_view stage.
2,623
def test_tika_parse_unreachable(self): html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>' # Check if exception is raised when Tika cannot be reached. self.parser.tika_server = "" self.assertRaises(ParseError, self.parser.tika_parse, html)
GIVEN: - Fresh start WHEN: - tika parsing is called but tika is not available THEN: - a ParseError Exception is thrown
22
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_tika_parse_unreachable(self): html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>' # Check if exception is raised when Tika cannot be reached. self.parser.tika_server = "" self.assertRaises(ParseError, self.parser.tika_parse, html) ``` ###Assistant : GIVEN: - Fresh start WHEN: - tika parsing is called but tika is not available THEN: - a ParseError Exception is thrown
2,624
def test_read_config_file_2(): tpot_obj = TPOTRegressor() assert_raises(ValueError, tpot_obj._read_config_file, "tests/test_config.py.bad")
Assert that _read_config_file rasies ValueError with wrong dictionary format
9
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_read_config_file_2(): tpot_obj = TPOTRegressor() assert_raises(ValueError, tpot_obj._read_config_file, "tests/test_config.py.bad") ``` ###Assistant : Assert that _read_config_file rasies ValueError with wrong dictionary format
2,625
def pre_delete_handler(self, sender, instance, **kwargs): key = self.get_key_for_instance(instance) object_type = instance._meta.verbose_name # Delete an existing object logger.debug(f"[{self.branch}] Staging deletion of {object_type} {instance} (PK: {instance.pk})") self.queue[key] = (ChangeActionChoices.ACTION_DELETE, None)
Hooks to the pre_delete signal when a branch is active to queue delete actions.
14
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pre_delete_handler(self, sender, instance, **kwargs): key = self.get_key_for_instance(instance) object_type = instance._meta.verbose_name # Delete an existing object logger.debug(f"[{self.branch}] Staging deletion of {object_type} {instance} (PK: {instance.pk})") self.queue[key] = (ChangeActionChoices.ACTION_DELETE, None) ``` ###Assistant : Hooks to the pre_delete signal when a branch is active to queue delete actions.
2,626
def _reset_state(self): self.cache = {} self.resolved_nodes = 0 self.finished_last_inference = True # maps DAGNode uuid to unique instance of a gradio block self.node_to_block: Dict[DAGNode, Any] = {} # maps InputAttributeNodes to unique instance of interactive gradio block self.input_key_to_blocks: Dict[int, Any] = {}
Resets state for each new RayServeHandle representing a new DAG.
10
42
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reset_state(self): self.cache = {} self.resolved_nodes = 0 self.finished_last_inference = True # maps DAGNode uuid to unique instance of a gradio block self.node_to_block: Dict[DAGNode, Any] = {} # maps InputAttributeNodes to unique instance of interactive gradio block self.input_key_to_blocks: Dict[int, Any] = {} ``` ###Assistant : Resets state for each new RayServeHandle representing a new DAG.
2,627
def find_element(self, selector): return self.driver.find_element(By.CSS_SELECTOR, selector)
find_element returns the first found element by the css `selector` shortcut to `driver.find_element(By.CSS_SELECTOR, ...)`.
14
6
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_element(self, selector): return self.driver.find_element(By.CSS_SELECTOR, selector) ``` ###Assistant : find_element returns the first found element by the css `selector` shortcut to `driver.find_element(By.CSS_SELECTOR, ...)`.
2,628
def test_bad_persist_value(self): with self.assertRaises(StreamlitAPIException) as e:
Throw an error if an invalid value is passed to 'persist'.
11
6
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bad_persist_value(self): with self.assertRaises(StreamlitAPIException) as e: ``` ###Assistant : Throw an error if an invalid value is passed to 'persist'.
2,629
def _getDataFileTagsOptionHelp(): return % ", ".join( "'%s' (%s)" % d for d in data_files_tags ) data_file_tags_option = data_group.add_option( "--data-file-tags", action="append", dest="data_file_tags", metavar="DATA_TAGS", default=[], ) parser.add_option_group(data_group) execute_group = OptionGroup(parser, "Immediate execution after compilation") execute_group.add_option( "--run", action="store_true", dest="immediate_execution", default=is_nuitka_run, help= % ("on" if is_nuitka_run else "off"), ) execute_group.add_option( "--debugger", "--gdb", action="store_true", dest="debugger", default=False, help=, ) execute_group.add_option( "--execute-with-pythonpath", action="store_true", dest="keep_pythonpath", default=False, help=, ) parser.add_option_group(execute_group) dump_group = OptionGroup(parser, "Dump options for internal tree") dump_group.add_option( "--xml", action="store_true", dest="dump_xml", default=False, help="Dump the final result of optimization as XML, then exit.", ) parser.add_option_group(dump_group) codegen_group = OptionGroup(parser, "Code generation choices") codegen_group.add_option( "--disable-bytecode-cache", action="store_true", dest="disable_bytecode_cache", default=False, help=, ) codegen_group.add_option( "--full-compat", action="store_false", dest="improved", default=True, help=, ) codegen_group.add_option( "--file-reference-choice", action="store", dest="file_reference_mode", metavar="MODE", choices=("original", "runtime", "frozen"), default=None, help=, ) codegen_group.add_option( "--module-name-choice", action="store", dest="module_name_mode", metavar="MODE", choices=("original", "runtime"), default=None, help=, ) parser.add_option_group(codegen_group) output_group = OptionGroup(parser, "Output choices") output_group.add_option( "-o", action="store", dest="output_filename", metavar="FILENAME", default=None, help= % "<program_name>" + (".exe" if getOS() == "Windows" else ".bin"), ) output_group.add_option( "--output-dir", action="store", dest="output_dir", metavar="DIRECTORY", default="", help=, ) output_group.add_option( "--remove-output", action="store_true", dest="remove_build", default=False, help=, ) output_group.add_option( "--no-pyi-file", action="store_false", dest="pyi_file", default=True, help=, ) parser.add_option_group(output_group) debug_group = OptionGroup(parser, "Debug features") debug_group.add_option( "--debug", action="store_true", dest="debug", default=False, help=, ) debug_group.add_option( "--unstripped", action="store_true", dest="unstripped", default=False, help=, ) debug_group.add_option( "--profile", action="store_true", dest="profile", default=False, help=, ) debug_group.add_option( "--internal-graph", action="store_true", dest="graph", default=False, help=, ) debug_group.add_option( "--trace-execution", action="store_true", dest="trace_execution", default=False, help=, ) debug_group.add_option( "--recompile-c-only", action="store_true", dest="recompile_c_only", default=False, help=, ) debug_group.add_option( "--generate-c-only", action="store_true", dest="generate_c_only", default=False, help=, ) debug_group.add_option( "--experimental", action="append", dest="experimental", metavar="FLAG", default=[], help=, ) debug_group.add_option( "--explain-imports", action="store_true", dest="explain_imports", default=False, help=SUPPRESS_HELP, ) debug_group.add_option( "--low-memory", action="store_true", dest="low_memory", default=False, help=, ) if os.name == "nt": debug_group.add_option( "--disable-dll-dependency-cache", action="store_true", dest="no_dependency_cache", default=False, help=, ) debug_group.add_option( "--force-dll-dependency-cache-update", action="store_true", dest="update_dependency_cache", default=False, help=, ) # This is for testing framework, "coverage.py" hates to loose the process. And # we can use it to make sure it's not done unknowingly. parser.add_option( "--must-not-re-execute", action="store_false", dest="allow_reexecute", default=True, help=SUPPRESS_HELP, ) parser.add_option_group(debug_group) c_compiler_group = OptionGroup(parser, "Backend C compiler choice") c_compiler_group.add_option( "--clang", action="store_true", dest="clang", default=False, help=, ) c_compiler_group.add_option( "--mingw64", action="store_true", dest="mingw64", default=False, help=, ) c_compiler_group.add_option( "--msvc", action="store", dest="msvc_version", default=None, help=, ) c_compiler_group.add_option( "-j", "--jobs", action="store", dest="jobs", metavar="N", default=None, help=, ) c_compiler_group.add_option( "--lto", action="store", dest="lto", metavar="choice", default="auto", choices=("yes", "no", "auto"), help=, ) c_compiler_group.add_option( "--static-libpython", action="store", dest="static_libpython", metavar="choice", default="auto", choices=("yes", "no", "auto"), help=, ) c_compiler_group.add_option( "--disable-ccache", action="store_true", dest="disable_ccache", default=False, help=, ) parser.add_option_group(c_compiler_group) pgo_group = OptionGroup(parser, "PGO compilation choices") pgo_group.add_option( "--pgo", action="store_true", dest="is_c_pgo", default=False, help=, ) pgo_group.add_option( "--pgo-python", action="store_true", dest="is_python_pgo", default=False, help=SUPPRESS_HELP, ) pgo_group.add_option( "--pgo-python-input", action="store", dest="python_pgo_input", default=None, help=SUPPRESS_HELP, ) pgo_group.add_option( "--pgo-python-policy-unused-module", action="store", dest="python_pgo_policy_unused_module", choices=("include", "exclude", "bytecode"), default="include", help=SUPPRESS_HELP, ) pgo_group.add_option( "--pgo-args", action="store", dest="pgo_args", default="", help=, ) pgo_group.add_option( "--pgo-executable", action="store", dest="pgo_executable", default=None, help=, ) parser.add_option_group(pgo_group) tracing_group = OptionGroup(parser, "Tracing features") tracing_group.add_option( "--quiet", action="store_true", dest="quiet", default=False, help=, ) tracing_group.add_option( "--show-scons", action="store_true", dest="show_scons", default=False, help=, ) tracing_group.add_option( "--show-progress", action="store_true", dest="show_progress", default=False, help=, ) tracing_group.add_option( "--no-progressbar", action="store_false", dest="progress_bar", default=True, help=, ) tracing_group.add_option( "--show-memory", action="store_true", dest="show_memory", default=False, help=, ) tracing_group.add_option( "--show-modules", action="store_true", dest="show_inclusion", default=False, help=, ) tracing_group.add_option( "--show-modules-output", action="store", dest="show_inclusion_output", metavar="PATH", default=None, help=, ) tracing_group.add_option( "--report", action="store", dest="compilation_report_filename", default=None, help=, ) tracing_group.add_option( "--verbose", action="store_true", dest="verbose", default=False, help=, ) tracing_group.add_option( "--verbose-output", action="store", dest="verbose_output", metavar="PATH", default=None, help=, ) parser.add_option_group(tracing_group) windows_group = OptionGroup(parser, "Windows specific controls") windows_group.add_option( "--windows-dependency-tool", action="store", dest="dependency_tool", default=None, help=SUPPRESS_HELP, ) windows_group.add_option( "--windows-disable-console", action="store_true", dest="disable_console", default=False, help=, ) windows_group.add_option( "--windows-icon-from-ico", action="append", dest="icon_path", metavar="ICON_PATH", default=[], help=, ) windows_group.add_option( "--windows-icon-from-exe", action="store", dest="icon_exe_path", metavar="ICON_EXE_PATH", default=None, help="Copy executable icons from this existing executable (Windows only).", ) windows_group.add_option( "--onefile-windows-splash-screen-image", action="store", dest="splash_screen_image", default=None, help=, ) windows_group.add_option( "--windows-uac-admin", action="store_true", dest="windows_uac_admin", metavar="WINDOWS_UAC_ADMIN", default=False, help="Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off.", ) windows_group.add_option( "--windows-uac-uiaccess", action="store_true", dest="windows_uac_uiaccess", metavar="WINDOWS_UAC_UIACCESS", default=False, help=, ) windows_group.add_option( "--windows-company-name", action="store", dest="windows_company_name", metavar="WINDOWS_COMPANY_NAME", default=None, help=, ) windows_group.add_option( "--windows-product-name", action="store", dest="windows_product_name", metavar="WINDOWS_PRODUCT_NAME", default=None, help=, ) windows_group.add_option( "--windows-file-version", action="store", dest="windows_file_version", metavar="WINDOWS_FILE_VERSION", default=None, help=, ) windows_group.add_option( "--windows-product-version", action="store", dest="windows_product_version", metavar="WINDOWS_PRODUCT_VERSION", default=None, help=, ) windows_group.add_option( "--windows-file-description", action="store", dest="windows_file_description", metavar="WINDOWS_FILE_DESCRIPTION", default=None, help=, ) windows_group.add_option( "--windows-onefile-tempdir", "--onefile-tempdir", action="store_true", dest="is_onefile_tempdir", metavar="ONEFILE_TEMPDIR", default=False, help=SUPPRESS_HELP, ) windows_group.add_option( "--windows-onefile-tempdir-spec", "--onefile-tempdir-spec", action="store", dest="onefile_tempdir_spec", metavar="ONEFILE_TEMPDIR_SPEC", default=None, help=, ) windows_group.add_option( "--windows-force-stdout-spec", action="store", dest="force_stdout_spec", metavar="WINDOWS_FORCE_STDOUT_SPEC", default=None, help=, ) windows_group.add_option( "--windows-force-stderr-spec", action="store", dest="force_stderr_spec", metavar="WINDOWS_FORCE_STDERR_SPEC", default=None, help=, ) parser.add_option_group(windows_group) macos_group = OptionGroup(parser, "macOS specific controls") macos_group.add_option( "--macos-target-arch", action="store", dest="macos_target_arch", choices=("universal", "arm64", "x86_64"), metavar="MACOS_TARGET_ARCH", default=None, help=, ) macos_group.add_option( "--macos-disable-console", "--disable-console", action="store_true", dest="disable_console", default=False, help=, ) macos_group.add_option( "--macos-create-app-bundle", action="store_true", dest="macos_create_bundle", default=False, help=, ) macos_group.add_option( "--macos-onefile-icon", action="append", dest="icon_path", metavar="ICON_PATH", default=[], help="Add executable icon for binary to use. Can be given only one time. Defaults to Python icon if available.", ) macos_group.add_option( "--macos-signed-app-name", action="store", dest="macos_signed_app_name", metavar="MACOS_SIGNED_APP_NAME", default=None, help=, ) macos_group.add_option( "--macos-app-name", action="store", dest="macos_app_name", metavar="MACOS_APP_NAME", default=None, help=, ) macos_group.add_option( "--macos-app-version", action="store", dest="macos_app_version", metavar="MACOS_APP_VERSION", default=None, help=, ) parser.add_option_group(macos_group) linux_group = OptionGroup(parser, "Linux specific controls") linux_group.add_option( "--linux-onefile-icon", action="append", dest="icon_path", metavar="ICON_PATH", default=[], help="Add executable icon for onefile binary to use. Can be given only one time. Defaults to Python icon if available.", ) linux_group.add_option( "--linux-onefile-compression", action="store", dest="appimage_compression", choices=("gzip", "xz"), metavar="COMPRESSION", default="gzip", help="Compression method to use for Linux onefile builds. Defaults to gzip for faster decompression", ) parser.add_option_group(linux_group) plugin_group = OptionGroup(parser, "Plugin control") plugin_group.add_option( "--enable-plugin", "--plugin-enable", action="append", dest="plugins_enabled", metavar="PLUGIN_NAME", default=[], help=, ) plugin_group.add_option( "--disable-plugin", "--plugin-disable", action="append", dest="plugins_disabled", metavar="PLUGIN_NAME", default=[], help=, ) plugin_group.add_option( "--plugin-no-detection", action="store_false", dest="detect_missing_plugins", default=True, help=, ) plugin_group.add_option( "--plugin-list", action="store_true", dest="list_plugins", default=False, help=, ) parser.add_option_group(plugin_group) plugin_group.add_option( "--user-plugin", action="append", dest="user_plugins", metavar="PATH", default=[], help="The file name of user plugin. Can be given multiple times. Default empty.", ) plugin_group.add_option( "--persist-source-changes", action="store_true", dest="persist_source_changes", default=False, help=, )
\ For included data files, special handlings can be chosen. With the commercial plugins, e.g. files can be included directly in the binary. The list is completed by some plugins. With the current list of plugins, these are available: %s. The default is empty.\ Execute immediately the created binary (or import the compiled module). Defaults to %s.\ Execute inside a debugger, e.g. "gdb" or "lldb" to automatically get a stack trace. Defaults to off.\ When immediately executing the created binary (--execute), don't reset PYTHONPATH. When all modules are successfully included, you ought to not need PYTHONPATH anymore.\ Do not reuse dependency analysis results for modules, esp. from standard library, that are included as bytecode.\ Enforce absolute compatibility with CPython. Do not even allow minor deviations from CPython behavior, e.g. not having better tracebacks or exception messages which are not really incompatible, but only different. This is intended for tests only and should not be used for normal use.\ Select what value "__file__" is going to be. With "runtime" (default for standalone binary mode and module mode), the created binaries and modules, use the location of themselves to deduct the value of "__file__". Included packages pretend to be in directories below that location. This allows you to include data files in deployments. If you merely seek acceleration, it's better for you to use the "original" value, where the source files location will be used. With "frozen" a notation "<frozen module_name>" is used. For compatibility reasons, the "__file__" value will always have ".py" suffix independent of what it really is.\ Select what value "__name__" and "__package__" are going to be. With "runtime" (default for module mode), the created module, it uses the parent package to deduct the value of "__package__", to be fully compatible. This allows for more optimization to happen, but normally any package can be loaded into another one, but this will raise an import error when it detects that with "original" mode.\ Specify how the executable should be named. For extension modules there is no choice, also not for standalone mode and using it will be an error. This may include path information that needs to exist though. Defaults to '%s' on this platform. \ Specify where intermediate and final output files should be put. The DIRECTORY will be populated with C files, object files, etc. Defaults to current directory. \ Removes the build directory after producing the module or exe file. Defaults to off.\ Do not create a ".pyi" file for extension modules created by Nuitka. This is used to detect implicit imports. Defaults to off.\ Executing all self checks possible to find errors in Nuitka, do not use for production. Defaults to off.\ Keep debug info in the resulting object file for better debugger interaction. Defaults to off.\ Enable vmprof based profiling of time spent. Not working currently. Defaults to off.\ Create graph of optimization process internals, do not use for whole programs, but only for small test cases. Defaults to off.\ Traced execution output, output the line of code before executing it. Defaults to off.\ This is not incremental compilation, but for Nuitka development only. Takes existing files and simply compile them as C again. Allows compiling edited C files for quick debugging changes to the generated source, e.g. to see if code is passed by, values output, etc, Defaults to off. Depends on compiling Python source to determine which files it should look at.\ Generate only C source code, and do not compile it to binary or module. This is for debugging and code coverage analysis that doesn't waste CPU. Defaults to off. Do not think you can use this directly.\ Use features declared as 'experimental'. May have no effect if no experimental features are present in the code. Uses secret tags (check source) per experimented feature.\ Attempt to use less memory, by forking less C compilation jobs and using options that use less memory. For use on embedded machines. Use this in case of out of memory problems. Defaults to off.\ Disable the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors. \ For an update of the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors or known to need an update. \ Enforce the use of clang. On Windows this requires a working Visual Studio version to piggy back on. Defaults to off.\ Enforce the use of MinGW64 on Windows. Defaults to off.\ Enforce the use of specific MSVC version on Windows. Allowed values are e.g. "14.3" (MSVC 2022) and other MSVC version numbers, specify "list" for a list of installed compilers, or use "latest". Defaults to latest MSVC being used if installed, otherwise MinGW64 is used.\ Specify the allowed number of parallel C compiler jobs. Defaults to the system CPU count.\ Use link time optimizations (MSVC, gcc, clang). Allowed values are "yes", "no", and "auto" (when it's known to work). Defaults to "auto".\ Use static link library of Python. Allowed values are "yes", "no", and "auto" (when it's known to work). Defaults to "auto".\ Do not attempt to use ccache (gcc, clang, etc.) or clcache (MSVC, clangcl).\ Enables C level profile guided optimization (PGO), by executing a dedicated build first for a profiling run, and then using the result to feedback into the C compilation. Note: This is experimental and not working with standalone modes of Nuitka yet. Defaults to off.\ Arguments to be passed in case of profile guided optimization. These are passed to the special built executable during the PGO profiling run. Default empty.\ Command to execute when collecting profile information. Use this only, if you need to launch it through a script that prepares it to run. Default use created program.\ Disable all information outputs, but show warnings. Defaults to off.\ Operate Scons in non-quiet mode, showing the executed commands. Defaults to off.Provide progress information and statistics. Defaults to off.Disable progress bar outputs (if tqdm is installed). Defaults to off.Provide memory information and statistics. Defaults to off.\ Provide information for included modules and DLLs Defaults to off.\ Where to output --show-modules, should be a filename. Default is standard output.\ Report module inclusion in an XML output file. Default is off.\ Output details of actions taken, esp. in optimizations. Can become a lot. Defaults to off.\ Where to output --verbose, should be a filename. Default is standard output.\ When compiling for Windows, disable the console window. Defaults to off.\ Add executable icon. Can be given multiple times for different resolutions or files with multiple icons inside. In the later case, you may also suffix with #<n> where n is an integer index starting from 1, specifying a specific icon to be included, and all others to be ignored.\ When compiling for Windows and onefile, show this while loading the application. Defaults to off.\ Request Windows User Control, to enforce running from a few folders only, remote desktop access. (Windows only). Defaults to off.\ Name of the company to use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Name of the product to use in Windows Version information. Defaults to base filename of the binary.\ File version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Product version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Description of the file use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to nonsense.\ Use this as a temporary folder. Defaults to '%TEMP%\\onefile_%PID%_%TIME%', i.e. system temporary directory.\ Force standard output of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.out.txt', i.e. file near your program.\ Force standard error of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.err.txt', i.e. file near your program.\ What architectures is this to supposed to run on. Default and limit is what the running Python allows for. Default is "native" which is the architecture the Python is run with.\ When compiling for macOS, disable the console window and create a GUI application. Defaults to off.\ When compiling for macOS, create a bundle rather than a plain binary application. Currently experimental and incomplete. Currently this is the only way to unlock disabling of console.Defaults to off.\ Name of the application to use for macOS signing. Follow com.yourcompany.appname naming results for best results, as these have to be globally unique, and will grant protected API accesses.\ Name of the product to use in macOS bundle information. Defaults to base filename of the binary.\ Product version to use in macOS bundle information. Defaults to 1.0 if not given.\ Enabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Disabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Plugins can detect if they might be used, and the you can disable the warning via "--disable-plugin=plugin-that-warned", or you can use this option to disable the mechanism entirely, which also speeds up compilation slightly of course as this detection code is run in vain once you are certain of which plugins to use. Defaults to off.\ Show list of all available plugins and exit. Defaults to off.\ Write source changes to original Python files. Use with care. May need permissions, best for use in a virtualenv to debug if plugin code changes work with standard Python or to benefit from bloat removal even with pure Python. Default False.
1,740
859
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _getDataFileTagsOptionHelp(): return % ", ".join( "'%s' (%s)" % d for d in data_files_tags ) data_file_tags_option = data_group.add_option( "--data-file-tags", action="append", dest="data_file_tags", metavar="DATA_TAGS", default=[], ) parser.add_option_group(data_group) execute_group = OptionGroup(parser, "Immediate execution after compilation") execute_group.add_option( "--run", action="store_true", dest="immediate_execution", default=is_nuitka_run, help= % ("on" if is_nuitka_run else "off"), ) execute_group.add_option( "--debugger", "--gdb", action="store_true", dest="debugger", default=False, help=, ) execute_group.add_option( "--execute-with-pythonpath", action="store_true", dest="keep_pythonpath", default=False, help=, ) parser.add_option_group(execute_group) dump_group = OptionGroup(parser, "Dump options for internal tree") dump_group.add_option( "--xml", action="store_true", dest="dump_xml", default=False, help="Dump the final result of optimization as XML, then exit.", ) parser.add_option_group(dump_group) codegen_group = OptionGroup(parser, "Code generation choices") codegen_group.add_option( "--disable-bytecode-cache", action="store_true", dest="disable_bytecode_cache", default=False, help=, ) codegen_group.add_option( "--full-compat", action="store_false", dest="improved", default=True, help=, ) codegen_group.add_option( "--file-reference-choice", action="store", dest="file_reference_mode", metavar="MODE", choices=("original", "runtime", "frozen"), default=None, help=, ) codegen_group.add_option( "--module-name-choice", action="store", dest="module_name_mode", metavar="MODE", choices=("original", "runtime"), default=None, help=, ) parser.add_option_group(codegen_group) output_group = OptionGroup(parser, "Output choices") output_group.add_option( "-o", action="store", dest="output_filename", metavar="FILENAME", default=None, help= % "<program_name>" + (".exe" if getOS() == "Windows" else ".bin"), ) output_group.add_option( "--output-dir", action="store", dest="output_dir", metavar="DIRECTORY", default="", help=, ) output_group.add_option( "--remove-output", action="store_true", dest="remove_build", default=False, help=, ) output_group.add_option( "--no-pyi-file", action="store_false", dest="pyi_file", default=True, help=, ) parser.add_option_group(output_group) debug_group = OptionGroup(parser, "Debug features") debug_group.add_option( "--debug", action="store_true", dest="debug", default=False, help=, ) debug_group.add_option( "--unstripped", action="store_true", dest="unstripped", default=False, help=, ) debug_group.add_option( "--profile", action="store_true", dest="profile", default=False, help=, ) debug_group.add_option( "--internal-graph", action="store_true", dest="graph", default=False, help=, ) debug_group.add_option( "--trace-execution", action="store_true", dest="trace_execution", default=False, help=, ) debug_group.add_option( "--recompile-c-only", action="store_true", dest="recompile_c_only", default=False, help=, ) debug_group.add_option( "--generate-c-only", action="store_true", dest="generate_c_only", default=False, help=, ) debug_group.add_option( "--experimental", action="append", dest="experimental", metavar="FLAG", default=[], help=, ) debug_group.add_option( "--explain-imports", action="store_true", dest="explain_imports", default=False, help=SUPPRESS_HELP, ) debug_group.add_option( "--low-memory", action="store_true", dest="low_memory", default=False, help=, ) if os.name == "nt": debug_group.add_option( "--disable-dll-dependency-cache", action="store_true", dest="no_dependency_cache", default=False, help=, ) debug_group.add_option( "--force-dll-dependency-cache-update", action="store_true", dest="update_dependency_cache", default=False, help=, ) # This is for testing framework, "coverage.py" hates to loose the process. And # we can use it to make sure it's not done unknowingly. parser.add_option( "--must-not-re-execute", action="store_false", dest="allow_reexecute", default=True, help=SUPPRESS_HELP, ) parser.add_option_group(debug_group) c_compiler_group = OptionGroup(parser, "Backend C compiler choice") c_compiler_group.add_option( "--clang", action="store_true", dest="clang", default=False, help=, ) c_compiler_group.add_option( "--mingw64", action="store_true", dest="mingw64", default=False, help=, ) c_compiler_group.add_option( "--msvc", action="store", dest="msvc_version", default=None, help=, ) c_compiler_group.add_option( "-j", "--jobs", action="store", dest="jobs", metavar="N", default=None, help=, ) c_compiler_group.add_option( "--lto", action="store", dest="lto", metavar="choice", default="auto", choices=("yes", "no", "auto"), help=, ) c_compiler_group.add_option( "--static-libpython", action="store", dest="static_libpython", metavar="choice", default="auto", choices=("yes", "no", "auto"), help=, ) c_compiler_group.add_option( "--disable-ccache", action="store_true", dest="disable_ccache", default=False, help=, ) parser.add_option_group(c_compiler_group) pgo_group = OptionGroup(parser, "PGO compilation choices") pgo_group.add_option( "--pgo", action="store_true", dest="is_c_pgo", default=False, help=, ) pgo_group.add_option( "--pgo-python", action="store_true", dest="is_python_pgo", default=False, help=SUPPRESS_HELP, ) pgo_group.add_option( "--pgo-python-input", action="store", dest="python_pgo_input", default=None, help=SUPPRESS_HELP, ) pgo_group.add_option( "--pgo-python-policy-unused-module", action="store", dest="python_pgo_policy_unused_module", choices=("include", "exclude", "bytecode"), default="include", help=SUPPRESS_HELP, ) pgo_group.add_option( "--pgo-args", action="store", dest="pgo_args", default="", help=, ) pgo_group.add_option( "--pgo-executable", action="store", dest="pgo_executable", default=None, help=, ) parser.add_option_group(pgo_group) tracing_group = OptionGroup(parser, "Tracing features") tracing_group.add_option( "--quiet", action="store_true", dest="quiet", default=False, help=, ) tracing_group.add_option( "--show-scons", action="store_true", dest="show_scons", default=False, help=, ) tracing_group.add_option( "--show-progress", action="store_true", dest="show_progress", default=False, help=, ) tracing_group.add_option( "--no-progressbar", action="store_false", dest="progress_bar", default=True, help=, ) tracing_group.add_option( "--show-memory", action="store_true", dest="show_memory", default=False, help=, ) tracing_group.add_option( "--show-modules", action="store_true", dest="show_inclusion", default=False, help=, ) tracing_group.add_option( "--show-modules-output", action="store", dest="show_inclusion_output", metavar="PATH", default=None, help=, ) tracing_group.add_option( "--report", action="store", dest="compilation_report_filename", default=None, help=, ) tracing_group.add_option( "--verbose", action="store_true", dest="verbose", default=False, help=, ) tracing_group.add_option( "--verbose-output", action="store", dest="verbose_output", metavar="PATH", default=None, help=, ) parser.add_option_group(tracing_group) windows_group = OptionGroup(parser, "Windows specific controls") windows_group.add_option( "--windows-dependency-tool", action="store", dest="dependency_tool", default=None, help=SUPPRESS_HELP, ) windows_group.add_option( "--windows-disable-console", action="store_true", dest="disable_console", default=False, help=, ) windows_group.add_option( "--windows-icon-from-ico", action="append", dest="icon_path", metavar="ICON_PATH", default=[], help=, ) windows_group.add_option( "--windows-icon-from-exe", action="store", dest="icon_exe_path", metavar="ICON_EXE_PATH", default=None, help="Copy executable icons from this existing executable (Windows only).", ) windows_group.add_option( "--onefile-windows-splash-screen-image", action="store", dest="splash_screen_image", default=None, help=, ) windows_group.add_option( "--windows-uac-admin", action="store_true", dest="windows_uac_admin", metavar="WINDOWS_UAC_ADMIN", default=False, help="Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off.", ) windows_group.add_option( "--windows-uac-uiaccess", action="store_true", dest="windows_uac_uiaccess", metavar="WINDOWS_UAC_UIACCESS", default=False, help=, ) windows_group.add_option( "--windows-company-name", action="store", dest="windows_company_name", metavar="WINDOWS_COMPANY_NAME", default=None, help=, ) windows_group.add_option( "--windows-product-name", action="store", dest="windows_product_name", metavar="WINDOWS_PRODUCT_NAME", default=None, help=, ) windows_group.add_option( "--windows-file-version", action="store", dest="windows_file_version", metavar="WINDOWS_FILE_VERSION", default=None, help=, ) windows_group.add_option( "--windows-product-version", action="store", dest="windows_product_version", metavar="WINDOWS_PRODUCT_VERSION", default=None, help=, ) windows_group.add_option( "--windows-file-description", action="store", dest="windows_file_description", metavar="WINDOWS_FILE_DESCRIPTION", default=None, help=, ) windows_group.add_option( "--windows-onefile-tempdir", "--onefile-tempdir", action="store_true", dest="is_onefile_tempdir", metavar="ONEFILE_TEMPDIR", default=False, help=SUPPRESS_HELP, ) windows_group.add_option( "--windows-onefile-tempdir-spec", "--onefile-tempdir-spec", action="store", dest="onefile_tempdir_spec", metavar="ONEFILE_TEMPDIR_SPEC", default=None, help=, ) windows_group.add_option( "--windows-force-stdout-spec", action="store", dest="force_stdout_spec", metavar="WINDOWS_FORCE_STDOUT_SPEC", default=None, help=, ) windows_group.add_option( "--windows-force-stderr-spec", action="store", dest="force_stderr_spec", metavar="WINDOWS_FORCE_STDERR_SPEC", default=None, help=, ) parser.add_option_group(windows_group) macos_group = OptionGroup(parser, "macOS specific controls") macos_group.add_option( "--macos-target-arch", action="store", dest="macos_target_arch", choices=("universal", "arm64", "x86_64"), metavar="MACOS_TARGET_ARCH", default=None, help=, ) macos_group.add_option( "--macos-disable-console", "--disable-console", action="store_true", dest="disable_console", default=False, help=, ) macos_group.add_option( "--macos-create-app-bundle", action="store_true", dest="macos_create_bundle", default=False, help=, ) macos_group.add_option( "--macos-onefile-icon", action="append", dest="icon_path", metavar="ICON_PATH", default=[], help="Add executable icon for binary to use. Can be given only one time. Defaults to Python icon if available.", ) macos_group.add_option( "--macos-signed-app-name", action="store", dest="macos_signed_app_name", metavar="MACOS_SIGNED_APP_NAME", default=None, help=, ) macos_group.add_option( "--macos-app-name", action="store", dest="macos_app_name", metavar="MACOS_APP_NAME", default=None, help=, ) macos_group.add_option( "--macos-app-version", action="store", dest="macos_app_version", metavar="MACOS_APP_VERSION", default=None, help=, ) parser.add_option_group(macos_group) linux_group = OptionGroup(parser, "Linux specific controls") linux_group.add_option( "--linux-onefile-icon", action="append", dest="icon_path", metavar="ICON_PATH", default=[], help="Add executable icon for onefile binary to use. Can be given only one time. Defaults to Python icon if available.", ) linux_group.add_option( "--linux-onefile-compression", action="store", dest="appimage_compression", choices=("gzip", "xz"), metavar="COMPRESSION", default="gzip", help="Compression method to use for Linux onefile builds. Defaults to gzip for faster decompression", ) parser.add_option_group(linux_group) plugin_group = OptionGroup(parser, "Plugin control") plugin_group.add_option( "--enable-plugin", "--plugin-enable", action="append", dest="plugins_enabled", metavar="PLUGIN_NAME", default=[], help=, ) plugin_group.add_option( "--disable-plugin", "--plugin-disable", action="append", dest="plugins_disabled", metavar="PLUGIN_NAME", default=[], help=, ) plugin_group.add_option( "--plugin-no-detection", action="store_false", dest="detect_missing_plugins", default=True, help=, ) plugin_group.add_option( "--plugin-list", action="store_true", dest="list_plugins", default=False, help=, ) parser.add_option_group(plugin_group) plugin_group.add_option( "--user-plugin", action="append", dest="user_plugins", metavar="PATH", default=[], help="The file name of user plugin. Can be given multiple times. Default empty.", ) plugin_group.add_option( "--persist-source-changes", action="store_true", dest="persist_source_changes", default=False, help=, ) ``` ###Assistant : \ For included data files, special handlings can be chosen. With the commercial plugins, e.g. files can be included directly in the binary. The list is completed by some plugins. With the current list of plugins, these are available: %s. The default is empty.\ Execute immediately the created binary (or import the compiled module). Defaults to %s.\ Execute inside a debugger, e.g. "gdb" or "lldb" to automatically get a stack trace. Defaults to off.\ When immediately executing the created binary (--execute), don't reset PYTHONPATH. When all modules are successfully included, you ought to not need PYTHONPATH anymore.\ Do not reuse dependency analysis results for modules, esp. from standard library, that are included as bytecode.\ Enforce absolute compatibility with CPython. Do not even allow minor deviations from CPython behavior, e.g. not having better tracebacks or exception messages which are not really incompatible, but only different. This is intended for tests only and should not be used for normal use.\ Select what value "__file__" is going to be. With "runtime" (default for standalone binary mode and module mode), the created binaries and modules, use the location of themselves to deduct the value of "__file__". Included packages pretend to be in directories below that location. This allows you to include data files in deployments. If you merely seek acceleration, it's better for you to use the "original" value, where the source files location will be used. With "frozen" a notation "<frozen module_name>" is used. For compatibility reasons, the "__file__" value will always have ".py" suffix independent of what it really is.\ Select what value "__name__" and "__package__" are going to be. With "runtime" (default for module mode), the created module, it uses the parent package to deduct the value of "__package__", to be fully compatible. This allows for more optimization to happen, but normally any package can be loaded into another one, but this will raise an import error when it detects that with "original" mode.\ Specify how the executable should be named. For extension modules there is no choice, also not for standalone mode and using it will be an error. This may include path information that needs to exist though. Defaults to '%s' on this platform. \ Specify where intermediate and final output files should be put. The DIRECTORY will be populated with C files, object files, etc. Defaults to current directory. \ Removes the build directory after producing the module or exe file. Defaults to off.\ Do not create a ".pyi" file for extension modules created by Nuitka. This is used to detect implicit imports. Defaults to off.\ Executing all self checks possible to find errors in Nuitka, do not use for production. Defaults to off.\ Keep debug info in the resulting object file for better debugger interaction. Defaults to off.\ Enable vmprof based profiling of time spent. Not working currently. Defaults to off.\ Create graph of optimization process internals, do not use for whole programs, but only for small test cases. Defaults to off.\ Traced execution output, output the line of code before executing it. Defaults to off.\ This is not incremental compilation, but for Nuitka development only. Takes existing files and simply compile them as C again. Allows compiling edited C files for quick debugging changes to the generated source, e.g. to see if code is passed by, values output, etc, Defaults to off. Depends on compiling Python source to determine which files it should look at.\ Generate only C source code, and do not compile it to binary or module. This is for debugging and code coverage analysis that doesn't waste CPU. Defaults to off. Do not think you can use this directly.\ Use features declared as 'experimental'. May have no effect if no experimental features are present in the code. Uses secret tags (check source) per experimented feature.\ Attempt to use less memory, by forking less C compilation jobs and using options that use less memory. For use on embedded machines. Use this in case of out of memory problems. Defaults to off.\ Disable the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors. \ For an update of the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors or known to need an update. \ Enforce the use of clang. On Windows this requires a working Visual Studio version to piggy back on. Defaults to off.\ Enforce the use of MinGW64 on Windows. Defaults to off.\ Enforce the use of specific MSVC version on Windows. Allowed values are e.g. "14.3" (MSVC 2022) and other MSVC version numbers, specify "list" for a list of installed compilers, or use "latest". Defaults to latest MSVC being used if installed, otherwise MinGW64 is used.\ Specify the allowed number of parallel C compiler jobs. Defaults to the system CPU count.\ Use link time optimizations (MSVC, gcc, clang). Allowed values are "yes", "no", and "auto" (when it's known to work). Defaults to "auto".\ Use static link library of Python. Allowed values are "yes", "no", and "auto" (when it's known to work). Defaults to "auto".\ Do not attempt to use ccache (gcc, clang, etc.) or clcache (MSVC, clangcl).\ Enables C level profile guided optimization (PGO), by executing a dedicated build first for a profiling run, and then using the result to feedback into the C compilation. Note: This is experimental and not working with standalone modes of Nuitka yet. Defaults to off.\ Arguments to be passed in case of profile guided optimization. These are passed to the special built executable during the PGO profiling run. Default empty.\ Command to execute when collecting profile information. Use this only, if you need to launch it through a script that prepares it to run. Default use created program.\ Disable all information outputs, but show warnings. Defaults to off.\ Operate Scons in non-quiet mode, showing the executed commands. Defaults to off.Provide progress information and statistics. Defaults to off.Disable progress bar outputs (if tqdm is installed). Defaults to off.Provide memory information and statistics. Defaults to off.\ Provide information for included modules and DLLs Defaults to off.\ Where to output --show-modules, should be a filename. Default is standard output.\ Report module inclusion in an XML output file. Default is off.\ Output details of actions taken, esp. in optimizations. Can become a lot. Defaults to off.\ Where to output --verbose, should be a filename. Default is standard output.\ When compiling for Windows, disable the console window. Defaults to off.\ Add executable icon. Can be given multiple times for different resolutions or files with multiple icons inside. In the later case, you may also suffix with #<n> where n is an integer index starting from 1, specifying a specific icon to be included, and all others to be ignored.\ When compiling for Windows and onefile, show this while loading the application. Defaults to off.\ Request Windows User Control, to enforce running from a few folders only, remote desktop access. (Windows only). Defaults to off.\ Name of the company to use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Name of the product to use in Windows Version information. Defaults to base filename of the binary.\ File version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Product version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Description of the file use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to nonsense.\ Use this as a temporary folder. Defaults to '%TEMP%\\onefile_%PID%_%TIME%', i.e. system temporary directory.\ Force standard output of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.out.txt', i.e. file near your program.\ Force standard error of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.err.txt', i.e. file near your program.\ What architectures is this to supposed to run on. Default and limit is what the running Python allows for. Default is "native" which is the architecture the Python is run with.\ When compiling for macOS, disable the console window and create a GUI application. Defaults to off.\ When compiling for macOS, create a bundle rather than a plain binary application. Currently experimental and incomplete. Currently this is the only way to unlock disabling of console.Defaults to off.\ Name of the application to use for macOS signing. Follow com.yourcompany.appname naming results for best results, as these have to be globally unique, and will grant protected API accesses.\ Name of the product to use in macOS bundle information. Defaults to base filename of the binary.\ Product version to use in macOS bundle information. Defaults to 1.0 if not given.\ Enabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Disabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Plugins can detect if they might be used, and the you can disable the warning via "--disable-plugin=plugin-that-warned", or you can use this option to disable the mechanism entirely, which also speeds up compilation slightly of course as this detection code is run in vain once you are certain of which plugins to use. Defaults to off.\ Show list of all available plugins and exit. Defaults to off.\ Write source changes to original Python files. Use with care. May need permissions, best for use in a virtualenv to debug if plugin code changes work with standard Python or to benefit from bloat removal even with pure Python. Default False.
2,630
def test_collect_workflow_action_data_post(self): response = self.client.post( reverse( "wagtailadmin_pages:collect_workflow_action_data", args=( self.page.id, "approve", self.page.current_workflow_task_state.id, ), ), {"comment": "This is my comment"}, ) self.assertEqual(response.status_code, 200) response_json = json.loads(response.content) self.assertEqual(response_json["step"], "success") self.assertEqual( response_json["cleaned_data"], {"comment": "This is my comment"} )
This tests that a POST request to the collect_workflow_action_data view (for the approve action) returns a modal response with the validated data
22
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_collect_workflow_action_data_post(self): response = self.client.post( reverse( "wagtailadmin_pages:collect_workflow_action_data", args=( self.page.id, "approve", self.page.current_workflow_task_state.id, ), ), {"comment": "This is my comment"}, ) self.assertEqual(response.status_code, 200) response_json = json.loads(response.content) self.assertEqual(response_json["step"], "success") self.assertEqual( response_json["cleaned_data"], {"comment": "This is my comment"} ) ``` ###Assistant : This tests that a POST request to the collect_workflow_action_data view (for the approve action) returns a modal response with the validated data
2,631
def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK)
CREATE PREDICTOR {self.test_model_1} FROM {PG_HANDLER_NAME} (SELECT * FROM {self.data_table_1} limit 50) PREDICT rental_price
13
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) ``` ###Assistant : CREATE PREDICTOR {self.test_model_1} FROM {PG_HANDLER_NAME} (SELECT * FROM {self.data_table_1} limit 50) PREDICT rental_price
2,632
def __call__(self, results): img = results['img'] if self.to_float32: img = img.astype(np.float32) results['img_path'] = None results['img'] = img height, width = img.shape[:2] results['height'] = height results['width'] = width results['ori_height'] = height results['ori_width'] = width return results @TRANSFORMS.register_module()
Call functions to add image meta information. Args: results (dict): Result dict with Webcam read image in ``results['img']``. Returns: dict: The dict contains loaded image and meta information.
28
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, results): img = results['img'] if self.to_float32: img = img.astype(np.float32) results['img_path'] = None results['img'] = img height, width = img.shape[:2] results['height'] = height results['width'] = width results['ori_height'] = height results['ori_width'] = width return results @TRANSFORMS.register_module() ``` ###Assistant : Call functions to add image meta information. Args: results (dict): Result dict with Webcam read image in ``results['img']``. Returns: dict: The dict contains loaded image and meta information.
2,633
def test_jemalloc_env_var_propagate(): gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path="", jemalloc_conf="", jemalloc_comps=[], process_type=gcs_ptype ) assert actual == expected actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=None, jemalloc_conf="a,b,c", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected library_path = "/abc" expected = {"LD_PRELOAD": library_path} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected # comps should be a list type. with pytest.raises(AssertionError): ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="", jemalloc_comps="ray.ray_constants.PROCESS_TYPE_GCS_SERVER,", process_type=gcs_ptype, ) # When comps don't match the process_type, it should return an empty dict. expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_RAYLET], process_type=gcs_ptype, ) library_path = "/abc" malloc_conf = "a,b,c" expected = {"LD_PRELOAD": library_path, "MALLOC_CONF": malloc_conf} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf=malloc_conf, jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected
Test `propagate_jemalloc_env_var` If the shared library path is not specified, it should return an empty dict. When the shared library is specified When the malloc config is specified
28
114
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_jemalloc_env_var_propagate(): gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path="", jemalloc_conf="", jemalloc_comps=[], process_type=gcs_ptype ) assert actual == expected actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=None, jemalloc_conf="a,b,c", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected library_path = "/abc" expected = {"LD_PRELOAD": library_path} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected # comps should be a list type. with pytest.raises(AssertionError): ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="", jemalloc_comps="ray.ray_constants.PROCESS_TYPE_GCS_SERVER,", process_type=gcs_ptype, ) # When comps don't match the process_type, it should return an empty dict. expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_RAYLET], process_type=gcs_ptype, ) library_path = "/abc" malloc_conf = "a,b,c" expected = {"LD_PRELOAD": library_path, "MALLOC_CONF": malloc_conf} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf=malloc_conf, jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected ``` ###Assistant : Test `propagate_jemalloc_env_var` If the shared library path is not specified, it should return an empty dict. When the shared library is specified When the malloc config is specified
2,634
def aug_test_bboxes(self, feats, img_metas, rescale=False): # check with_nms argument gb_sig = signature(self.get_results) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_results_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_results( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs.bboxes) aug_scores.append(bbox_outputs.scores) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs.labels) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) results = InstanceData() results.bboxes = _det_bboxes[:, :4] results.scores = _det_bboxes[:, 4] results.labels = det_labels return [results]
Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1.
131
171
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def aug_test_bboxes(self, feats, img_metas, rescale=False): # check with_nms argument gb_sig = signature(self.get_results) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_results_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_results( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs.bboxes) aug_scores.append(bbox_outputs.scores) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs.labels) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) results = InstanceData() results.bboxes = _det_bboxes[:, :4] results.scores = _det_bboxes[:, 4] results.labels = det_labels return [results] ``` ###Assistant : Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1.
2,635
def pauseProducing(self) -> None: logger.info("[%s] Pause producing", self.id()) self.state = ConnectionStates.PAUSED
This is called when both the kernel send buffer and the twisted tcp connection send buffers have become full. We don't actually have any control over those sizes, so we buffer some commands ourselves before knifing the connection due to the remote failing to keep up.
46
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pauseProducing(self) -> None: logger.info("[%s] Pause producing", self.id()) self.state = ConnectionStates.PAUSED ``` ###Assistant : This is called when both the kernel send buffer and the twisted tcp connection send buffers have become full. We don't actually have any control over those sizes, so we buffer some commands ourselves before knifing the connection due to the remote failing to keep up.
2,636
def throw(self, typ, val=None, tb=None): if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val
Raise an exception in the coroutine. Return next yielded value or raise StopIteration.
13
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def throw(self, typ, val=None, tb=None): if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val ``` ###Assistant : Raise an exception in the coroutine. Return next yielded value or raise StopIteration.
2,637
def _exit_buffer(self) -> None: self._buffer_index -= 1 self._check_buffer()
Leave buffer context, and render content if required.
8
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _exit_buffer(self) -> None: self._buffer_index -= 1 self._check_buffer() ``` ###Assistant : Leave buffer context, and render content if required.
2,638
def flatten_sensors_data(sensor): if "temp" in sensor["data"]: sensor["data"]["temperature"] = sensor["data"]["temp"]["c"] return sensor
Deconstruct SwitchBot library temp object C/Fº readings from dictionary.
9
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def flatten_sensors_data(sensor): if "temp" in sensor["data"]: sensor["data"]["temperature"] = sensor["data"]["temp"]["c"] return sensor ``` ###Assistant : Deconstruct SwitchBot library temp object C/Fº readings from dictionary.
2,639
def test_shared_embedding_column_with_non_sequence_categorical(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_a, categorical_column_b], dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) with self.assertRaisesRegex( ValueError, r"In embedding_column: aaa_shared_embedding\. " r"categorical_column must " r"be of type SequenceCategoricalColumn to use " r"SequenceFeatures\.", ): _, _ = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} )
Tests that error is raised for non-sequence shared embedding column.
10
115
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_shared_embedding_column_with_non_sequence_categorical(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_a, categorical_column_b], dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) with self.assertRaisesRegex( ValueError, r"In embedding_column: aaa_shared_embedding\. " r"categorical_column must " r"be of type SequenceCategoricalColumn to use " r"SequenceFeatures\.", ): _, _ = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} ) ``` ###Assistant : Tests that error is raised for non-sequence shared embedding column.
2,640
def get_next_stock_market_days(last_stock_day, n_next_days) -> list: n_days = 0 l_pred_days = [] years: list = [] holidays: list = [] if isinstance(last_stock_day, datetime): while n_days < n_next_days: last_stock_day += timedelta(hours=24) year = last_stock_day.date().year if year not in years: years.append(year) holidays += us_market_holidays(year) # Check if it is a weekend if last_stock_day.date().weekday() > 4: continue # Check if it is a holiday if last_stock_day.strftime("%Y-%m-%d") in holidays: continue # Otherwise stock market is open n_days += 1 l_pred_days.append(last_stock_day) else: while n_days < n_next_days: l_pred_days.append(last_stock_day + 1 + n_days) n_days += 1 return l_pred_days
Gets the next stock market day. Checks against weekends and holidays
11
90
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_next_stock_market_days(last_stock_day, n_next_days) -> list: n_days = 0 l_pred_days = [] years: list = [] holidays: list = [] if isinstance(last_stock_day, datetime): while n_days < n_next_days: last_stock_day += timedelta(hours=24) year = last_stock_day.date().year if year not in years: years.append(year) holidays += us_market_holidays(year) # Check if it is a weekend if last_stock_day.date().weekday() > 4: continue # Check if it is a holiday if last_stock_day.strftime("%Y-%m-%d") in holidays: continue # Otherwise stock market is open n_days += 1 l_pred_days.append(last_stock_day) else: while n_days < n_next_days: l_pred_days.append(last_stock_day + 1 + n_days) n_days += 1 return l_pred_days ``` ###Assistant : Gets the next stock market day. Checks against weekends and holidays
2,641
async def test_max_concurrent_in_progress_functions(extra_req_num): max_req = 10 a = A(max_num_call=max_req) # Run more than allowed concurrent async functions should trigger rate limiting res_arr = await asyncio.gather( *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)] ) fail_cnt = 0 for ok in res_arr: fail_cnt += 0 if ok else 1 expected_fail_cnt = max(0, extra_req_num) assert fail_cnt == expected_fail_cnt, ( f"{expected_fail_cnt} out of {max_req + extra_req_num} " f"concurrent runs should fail with max={max_req} but {fail_cnt}." ) assert a.num_call_ == 0, "All requests should be done" @pytest.mark.asyncio @pytest.mark.parametrize( "failures", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], )
Test rate limiting for concurrent in-progress requests on StateHead
9
120
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_max_concurrent_in_progress_functions(extra_req_num): max_req = 10 a = A(max_num_call=max_req) # Run more than allowed concurrent async functions should trigger rate limiting res_arr = await asyncio.gather( *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)] ) fail_cnt = 0 for ok in res_arr: fail_cnt += 0 if ok else 1 expected_fail_cnt = max(0, extra_req_num) assert fail_cnt == expected_fail_cnt, ( f"{expected_fail_cnt} out of {max_req + extra_req_num} " f"concurrent runs should fail with max={max_req} but {fail_cnt}." ) assert a.num_call_ == 0, "All requests should be done" @pytest.mark.asyncio @pytest.mark.parametrize( "failures", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], ) ``` ###Assistant : Test rate limiting for concurrent in-progress requests on StateHead
2,642
def transpose_qkv(X, num_heads): # Shape of input `X`: # (`batch_size`, no. of queries or key-value pairs, `num_hiddens`). # Shape of output `X`: # (`batch_size`, no. of queries or key-value pairs, `num_heads`, # `num_hiddens` / `num_heads`) X = X.reshape(X.shape[0], X.shape[1], num_heads, -1) # Shape of output `X`: # (`batch_size`, `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) X = X.transpose(0, 2, 1, 3) # Shape of `output`: # (`batch_size` * `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) return X.reshape(-1, X.shape[2], X.shape[3])
Transposition for parallel computation of multiple attention heads. Defined in :numref:`sec_multihead-attention`
11
87
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transpose_qkv(X, num_heads): # Shape of input `X`: # (`batch_size`, no. of queries or key-value pairs, `num_hiddens`). # Shape of output `X`: # (`batch_size`, no. of queries or key-value pairs, `num_heads`, # `num_hiddens` / `num_heads`) X = X.reshape(X.shape[0], X.shape[1], num_heads, -1) # Shape of output `X`: # (`batch_size`, `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) X = X.transpose(0, 2, 1, 3) # Shape of `output`: # (`batch_size` * `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) return X.reshape(-1, X.shape[2], X.shape[3]) ``` ###Assistant : Transposition for parallel computation of multiple attention heads. Defined in :numref:`sec_multihead-attention`
2,643
def current_state(self, session=NEW_SESSION) -> str: return ( session.query(TaskInstance.state) .filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id == self.task_id, TaskInstance.run_id == self.run_id, ) .scalar() )
Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. :param session: SQLAlchemy ORM Session
36
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current_state(self, session=NEW_SESSION) -> str: return ( session.query(TaskInstance.state) .filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id == self.task_id, TaskInstance.run_id == self.run_id, ) .scalar() ) ``` ###Assistant : Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. :param session: SQLAlchemy ORM Session
2,644
def house_graph(create_using=None): description = [ "adjacencylist", "House Graph", 5, [[2, 3], [1, 4], [1, 4, 5], [2, 3, 5], [3, 4]], ] G = make_small_undirected_graph(description, create_using) return G
Returns the House graph (square with triangle on top) The house graph is a simple undirected graph with 5 nodes and 6 edges [1]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph House graph in the form of a square with a triangle on top References ---------- .. [1] https://mathworld.wolfram.com/HouseGraph.html
68
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def house_graph(create_using=None): description = [ "adjacencylist", "House Graph", 5, [[2, 3], [1, 4], [1, 4, 5], [2, 3, 5], [3, 4]], ] G = make_small_undirected_graph(description, create_using) return G ``` ###Assistant : Returns the House graph (square with triangle on top) The house graph is a simple undirected graph with 5 nodes and 6 edges [1]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph House graph in the form of a square with a triangle on top References ---------- .. [1] https://mathworld.wolfram.com/HouseGraph.html
2,645
def check_header_validity(header): name, value = header for part in header: if type(part) not in HEADER_VALIDATORS: raise InvalidHeader( f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be " f"of type str or bytes, not {type(part)}" ) _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0]) _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
Verifies that header parts don't contain leading whitespace reserved characters, or return characters. :param header: tuple, in the format (name, value).
21
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_header_validity(header): name, value = header for part in header: if type(part) not in HEADER_VALIDATORS: raise InvalidHeader( f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be " f"of type str or bytes, not {type(part)}" ) _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0]) _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1]) ``` ###Assistant : Verifies that header parts don't contain leading whitespace reserved characters, or return characters. :param header: tuple, in the format (name, value).
2,646
def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order): order = fulfilled_order query = line = order.lines.first() metadata_key = "md key" metadata_value = "md value" line.store_value_in_private_metadata({metadata_key: metadata_value}) line.store_value_in_metadata({metadata_key: metadata_value}) line.save() staff_api_client.user.user_permissions.add(permission_manage_orders) response = staff_api_client.post_graphql(query) content = get_graphql_content(response) order_data = content["data"]["orders"]["edges"][0]["node"] first_order_data_line = order_data["lines"][0] variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk) assert first_order_data_line["thumbnail"] is None assert first_order_data_line["variant"]["id"] == variant_id assert first_order_data_line["quantity"] == line.quantity assert first_order_data_line["unitPrice"]["currency"] == line.unit_price.currency assert first_order_data_line["metadata"] == [ {"key": metadata_key, "value": metadata_value} ] assert first_order_data_line["privateMetadata"] == [ {"key": metadata_key, "value": metadata_value} ] expected_unit_price = Money( amount=str(first_order_data_line["unitPrice"]["gross"]["amount"]), currency="USD", ) assert first_order_data_line["totalPrice"]["currency"] == line.unit_price.currency assert expected_unit_price == line.unit_price.gross expected_total_price = Money( amount=str(first_order_data_line["totalPrice"]["gross"]["amount"]), currency="USD", ) assert expected_total_price == line.unit_price.gross * line.quantity allocation = line.allocations.first() allocation_id = graphene.Node.to_global_id("Allocation", allocation.pk) warehouse_id = graphene.Node.to_global_id( "Warehouse", allocation.stock.warehouse.pk ) assert first_order_data_line["allocations"] == [ { "id": allocation_id, "quantity": allocation.quantity_allocated, "warehouse": {"id": warehouse_id}, } ]
query OrdersQuery { orders(first: 1) { edges { node { lines { thumbnail(size: 540) { url } variant { id } quantity allocations { id quantity warehouse { id } } unitPrice { currency gross { amount } } totalPrice { currency gross { amount } } metadata { key value } privateMetadata { key value } } } } } }
62
129
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order): order = fulfilled_order query = line = order.lines.first() metadata_key = "md key" metadata_value = "md value" line.store_value_in_private_metadata({metadata_key: metadata_value}) line.store_value_in_metadata({metadata_key: metadata_value}) line.save() staff_api_client.user.user_permissions.add(permission_manage_orders) response = staff_api_client.post_graphql(query) content = get_graphql_content(response) order_data = content["data"]["orders"]["edges"][0]["node"] first_order_data_line = order_data["lines"][0] variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk) assert first_order_data_line["thumbnail"] is None assert first_order_data_line["variant"]["id"] == variant_id assert first_order_data_line["quantity"] == line.quantity assert first_order_data_line["unitPrice"]["currency"] == line.unit_price.currency assert first_order_data_line["metadata"] == [ {"key": metadata_key, "value": metadata_value} ] assert first_order_data_line["privateMetadata"] == [ {"key": metadata_key, "value": metadata_value} ] expected_unit_price = Money( amount=str(first_order_data_line["unitPrice"]["gross"]["amount"]), currency="USD", ) assert first_order_data_line["totalPrice"]["currency"] == line.unit_price.currency assert expected_unit_price == line.unit_price.gross expected_total_price = Money( amount=str(first_order_data_line["totalPrice"]["gross"]["amount"]), currency="USD", ) assert expected_total_price == line.unit_price.gross * line.quantity allocation = line.allocations.first() allocation_id = graphene.Node.to_global_id("Allocation", allocation.pk) warehouse_id = graphene.Node.to_global_id( "Warehouse", allocation.stock.warehouse.pk ) assert first_order_data_line["allocations"] == [ { "id": allocation_id, "quantity": allocation.quantity_allocated, "warehouse": {"id": warehouse_id}, } ] ``` ###Assistant : query OrdersQuery { orders(first: 1) { edges { node { lines { thumbnail(size: 540) { url } variant { id } quantity allocations { id quantity warehouse { id } } unitPrice { currency gross { amount } } totalPrice { currency gross { amount } } metadata { key value } privateMetadata { key value } } } } } }
2,647
def in4_pseudoheader(proto, u, plen): # type: (int, IP, int) -> bytes if u.len is not None: if u.ihl is None: olen = sum(len(x) for x in u.options) ihl = 5 + olen // 4 + (1 if olen % 4 else 0) else: ihl = u.ihl ln = max(u.len - 4 * ihl, 0) else: ln = plen # Filter out IPOption_LSRR and IPOption_SSRR sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or isinstance(opt, IPOption_SSRR)] len_sr_options = len(sr_options) if len_sr_options == 1 and len(sr_options[0].routers): # The checksum must be computed using the final # destination address u.dst = sr_options[0].routers[-1] elif len_sr_options > 1: message = "Found %d Source Routing Options! " message += "Falling back to IP.dst for checksum computation." warning(message, len_sr_options) return struct.pack("!4s4sHH", inet_pton(socket.AF_INET, u.src), inet_pton(socket.AF_INET, u.dst), proto, ln)
IPv4 Pseudo Header as defined in RFC793 as bytes :param proto: value of upper layer protocol :param u: IP layer instance :param plen: the length of the upper layer and payload
31
132
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in4_pseudoheader(proto, u, plen): # type: (int, IP, int) -> bytes if u.len is not None: if u.ihl is None: olen = sum(len(x) for x in u.options) ihl = 5 + olen // 4 + (1 if olen % 4 else 0) else: ihl = u.ihl ln = max(u.len - 4 * ihl, 0) else: ln = plen # Filter out IPOption_LSRR and IPOption_SSRR sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or isinstance(opt, IPOption_SSRR)] len_sr_options = len(sr_options) if len_sr_options == 1 and len(sr_options[0].routers): # The checksum must be computed using the final # destination address u.dst = sr_options[0].routers[-1] elif len_sr_options > 1: message = "Found %d Source Routing Options! " message += "Falling back to IP.dst for checksum computation." warning(message, len_sr_options) return struct.pack("!4s4sHH", inet_pton(socket.AF_INET, u.src), inet_pton(socket.AF_INET, u.dst), proto, ln) ``` ###Assistant : IPv4 Pseudo Header as defined in RFC793 as bytes :param proto: value of upper layer protocol :param u: IP layer instance :param plen: the length of the upper layer and payload
2,648
def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False): action = f"fill missing values in each group independently using {method} method" params_substitution = "limit : int\n" if params: params_substitution = ( params if overwrite_template_params else format_string( "{params}\n{params_substitution}", params=params, params_substitution=params_substitution, ) ) build_rules = "- QueryCompiler contains unsampled data with missing values filled." return doc_resample( action=action, extra_params=params_substitution, build_rules=build_rules, refer_to=refer_to, ) doc_dt = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name="Series.dt", ) doc_dt_timestamp = partial(doc_dt, dt_type="datetime") doc_dt_interval = partial(doc_dt, dt_type="interval") doc_dt_period = partial(doc_dt, dt_type="period") doc_dt_round = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name="Series.dt", ) doc_str_method = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name="Series.str", )
Build decorator which adds docstring for the resample fillna query compiler method. Parameters ---------- method : str Fillna method name. refer_to : str Method name in ``modin.pandas.resample.Resampler`` module to refer to for more information about parameters and output format. params : str, optional Method parameters in the NumPy docstyle format to substitute to the docstring template. overwrite_template_params : bool, default: False If `params` is specified indicates whether to overwrite method parameters in the docstring template or append then at the end. Returns ------- callable Get {prop} for each {dt_type} value. {params} Returns ------- BaseQueryCompiler New QueryCompiler with the same shape as `self`, where each element is {prop} for the corresponding {dt_type} value. Perform {refer_to} operation on the underlying time-series data to the specified `freq`. Parameters ---------- freq : str ambiguous : {{"raise", "infer", "NaT"}} or bool mask, default: "raise" nonexistent : {{"raise", "shift_forward", "shift_backward", "NaT"}} or timedelta, default: "raise" Returns ------- BaseQueryCompiler New QueryCompiler with performed {refer_to} operation on every element. Apply "{refer_to}" function to each string value in QueryCompiler. {params} Returns ------- BaseQueryCompiler New QueryCompiler containing the result of execution of the "{refer_to}" function against each string element.
189
91
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False): action = f"fill missing values in each group independently using {method} method" params_substitution = "limit : int\n" if params: params_substitution = ( params if overwrite_template_params else format_string( "{params}\n{params_substitution}", params=params, params_substitution=params_substitution, ) ) build_rules = "- QueryCompiler contains unsampled data with missing values filled." return doc_resample( action=action, extra_params=params_substitution, build_rules=build_rules, refer_to=refer_to, ) doc_dt = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name="Series.dt", ) doc_dt_timestamp = partial(doc_dt, dt_type="datetime") doc_dt_interval = partial(doc_dt, dt_type="interval") doc_dt_period = partial(doc_dt, dt_type="period") doc_dt_round = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name="Series.dt", ) doc_str_method = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name="Series.str", ) ``` ###Assistant : Build decorator which adds docstring for the resample fillna query compiler method. Parameters ---------- method : str Fillna method name. refer_to : str Method name in ``modin.pandas.resample.Resampler`` module to refer to for more information about parameters and output format. params : str, optional Method parameters in the NumPy docstyle format to substitute to the docstring template. overwrite_template_params : bool, default: False If `params` is specified indicates whether to overwrite method parameters in the docstring template or append then at the end. Returns ------- callable Get {prop} for each {dt_type} value. {params} Returns ------- BaseQueryCompiler New QueryCompiler with the same shape as `self`, where each element is {prop} for the corresponding {dt_type} value. Perform {refer_to} operation on the underlying time-series data to the specified `freq`. Parameters ---------- freq : str ambiguous : {{"raise", "infer", "NaT"}} or bool mask, default: "raise" nonexistent : {{"raise", "shift_forward", "shift_backward", "NaT"}} or timedelta, default: "raise" Returns ------- BaseQueryCompiler New QueryCompiler with performed {refer_to} operation on every element. Apply "{refer_to}" function to each string value in QueryCompiler. {params} Returns ------- BaseQueryCompiler New QueryCompiler containing the result of execution of the "{refer_to}" function against each string element.
2,649
def delete_events(ref_type, ref_name): events = ( frappe.db.sql_list( , (ref_type, ref_name), ) or [] ) if events: frappe.delete_doc("Event", events, for_reload=True)
SELECT distinct `tabEvent`.name from `tabEvent`, `tabEvent Participants` where `tabEvent`.name = `tabEvent Participants`.parent and `tabEvent Participants`.reference_doctype = %s and `tabEvent Participants`.reference_docname = %s
22
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_events(ref_type, ref_name): events = ( frappe.db.sql_list( , (ref_type, ref_name), ) or [] ) if events: frappe.delete_doc("Event", events, for_reload=True) ``` ###Assistant : SELECT distinct `tabEvent`.name from `tabEvent`, `tabEvent Participants` where `tabEvent`.name = `tabEvent Participants`.parent and `tabEvent Participants`.reference_doctype = %s and `tabEvent Participants`.reference_docname = %s
2,650
def check_started(self) -> ReplicaStartupStatus: status, version = self._actor.check_ready() if status == ReplicaStartupStatus.SUCCEEDED: # Re-assign DeploymentVersion if start / update / recover succeeded # by reading re-computed version in RayServeReplica if version is not None: self._version = version return status
Check if the replica has started. If so, transition to RUNNING. Should handle the case where the replica has already stopped. Returns: status: Most recent state of replica by querying actor obj ref
33
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_started(self) -> ReplicaStartupStatus: status, version = self._actor.check_ready() if status == ReplicaStartupStatus.SUCCEEDED: # Re-assign DeploymentVersion if start / update / recover succeeded # by reading re-computed version in RayServeReplica if version is not None: self._version = version return status ``` ###Assistant : Check if the replica has started. If so, transition to RUNNING. Should handle the case where the replica has already stopped. Returns: status: Most recent state of replica by querying actor obj ref
2,651
async def test_battery_low(hass, utcnow): helper = await setup_test_component( hass, create_battery_level_sensor, suffix="battery" ) state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 0, }, ) assert state.attributes["icon"] == "mdi:battery-10" state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 1, }, ) assert state.attributes["icon"] == "mdi:battery-alert"
Test reading the state of a HomeKit battery's low state.
10
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_battery_low(hass, utcnow): helper = await setup_test_component( hass, create_battery_level_sensor, suffix="battery" ) state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 0, }, ) assert state.attributes["icon"] == "mdi:battery-10" state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 1, }, ) assert state.attributes["icon"] == "mdi:battery-alert" ``` ###Assistant : Test reading the state of a HomeKit battery's low state.
2,652
def _hyab(self, y_true, y_pred): delta = y_true - y_pred root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None)) delta_norm = frobenius_norm(delta[..., 1:3]) return root + delta_norm
Compute the HyAB distance between true and predicted images. Parameters ---------- y_true: :class:`plaidml.tile.Value` The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space y_pred: :class:`plaidml.tile.Value` The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space Returns ------- :class:`plaidml.tile.Value` image tensor containing the per-pixel HyAB distances between true and predicted images
56
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _hyab(self, y_true, y_pred): delta = y_true - y_pred root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None)) delta_norm = frobenius_norm(delta[..., 1:3]) return root + delta_norm ``` ###Assistant : Compute the HyAB distance between true and predicted images. Parameters ---------- y_true: :class:`plaidml.tile.Value` The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space y_pred: :class:`plaidml.tile.Value` The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space Returns ------- :class:`plaidml.tile.Value` image tensor containing the per-pixel HyAB distances between true and predicted images
2,653
def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected): args = self.parser.parse_args( [ 'db', 'clean', '--clean-before-timestamp', '2021-01-01', *dry_run_arg, ] ) db_command.cleanup_tables(args) run_cleanup_mock.assert_called_once_with( table_names=None, dry_run=expected, clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'), verbose=False, confirm=True, skip_archive=False, )
When tz included in the string then default timezone should not be used.
13
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected): args = self.parser.parse_args( [ 'db', 'clean', '--clean-before-timestamp', '2021-01-01', *dry_run_arg, ] ) db_command.cleanup_tables(args) run_cleanup_mock.assert_called_once_with( table_names=None, dry_run=expected, clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'), verbose=False, confirm=True, skip_archive=False, ) ``` ###Assistant : When tz included in the string then default timezone should not be used.
2,654
def async_dismiss_setup_message(hass, entry_id): persistent_notification.async_dismiss(hass, entry_id)
Dismiss persistent notification and remove QR code.
7
5
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def async_dismiss_setup_message(hass, entry_id): persistent_notification.async_dismiss(hass, entry_id) ``` ###Assistant : Dismiss persistent notification and remove QR code.
2,655
def get_containing_app_config(self, object_name): self.check_apps_ready() candidates = [] for app_config in self.app_configs.values(): if object_name.startswith(app_config.name): subpath = object_name[len(app_config.name) :] if subpath == "" or subpath[0] == ".": candidates.append(app_config) if candidates: return sorted(candidates, key=lambda ac: -len(ac.name))[0]
Look for an app config containing a given object. object_name is the dotted Python path to the object. Return the app config for the inner application in case of nesting. Return None if the object isn't in any registered app config.
41
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_containing_app_config(self, object_name): self.check_apps_ready() candidates = [] for app_config in self.app_configs.values(): if object_name.startswith(app_config.name): subpath = object_name[len(app_config.name) :] if subpath == "" or subpath[0] == ".": candidates.append(app_config) if candidates: return sorted(candidates, key=lambda ac: -len(ac.name))[0] ``` ###Assistant : Look for an app config containing a given object. object_name is the dotted Python path to the object. Return the app config for the inner application in case of nesting. Return None if the object isn't in any registered app config.
2,656
def _track_variables(self, value): for val in tf.nest.flatten(value): if isinstance(val, tf.Variable): self._track_variable(val) elif tf_utils.is_extension_type(val): # Manually expand extension types to track resource variables. nested_vals = tf_utils.type_spec_from_value(val)._to_components( val ) self._track_variables(nested_vals)
Tracks `Variable`s including `Variable`s in `CompositeTensor`s.
6
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _track_variables(self, value): for val in tf.nest.flatten(value): if isinstance(val, tf.Variable): self._track_variable(val) elif tf_utils.is_extension_type(val): # Manually expand extension types to track resource variables. nested_vals = tf_utils.type_spec_from_value(val)._to_components( val ) self._track_variables(nested_vals) ``` ###Assistant : Tracks `Variable`s including `Variable`s in `CompositeTensor`s.
2,657
def _create_vhost_v2(self, node): addrs = set() for param in node.parameters: addr = obj.Addr.fromstring(param) if addr: addrs.add(addr) is_ssl = False # Exclusion to match the behavior in get_virtual_hosts_v2 sslengine = node.find_directives("SSLEngine", exclude=False) if sslengine: for directive in sslengine: if directive.parameters[0].lower() == "on": is_ssl = True break # "SSLEngine on" might be set outside of <VirtualHost> # Treat vhosts with port 443 as ssl vhosts for addr in addrs: if addr.get_port() == "443": is_ssl = True enabled = apache_util.included_in_paths(node.filepath, self.parsed_paths) macro = False # Check if the VirtualHost is contained in a mod_macro block if node.find_ancestors("Macro"): macro = True vhost = obj.VirtualHost( node.filepath, None, addrs, is_ssl, enabled, modmacro=macro, node=node ) self._populate_vhost_names_v2(vhost) return vhost
Used by get_virtual_hosts_v2 to create vhost objects using ParserNode interfaces. :param interfaces.BlockNode node: The BlockNode object of VirtualHost block :returns: newly created vhost :rtype: :class:`~certbot_apache.obj.VirtualHost`
25
111
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_vhost_v2(self, node): addrs = set() for param in node.parameters: addr = obj.Addr.fromstring(param) if addr: addrs.add(addr) is_ssl = False # Exclusion to match the behavior in get_virtual_hosts_v2 sslengine = node.find_directives("SSLEngine", exclude=False) if sslengine: for directive in sslengine: if directive.parameters[0].lower() == "on": is_ssl = True break # "SSLEngine on" might be set outside of <VirtualHost> # Treat vhosts with port 443 as ssl vhosts for addr in addrs: if addr.get_port() == "443": is_ssl = True enabled = apache_util.included_in_paths(node.filepath, self.parsed_paths) macro = False # Check if the VirtualHost is contained in a mod_macro block if node.find_ancestors("Macro"): macro = True vhost = obj.VirtualHost( node.filepath, None, addrs, is_ssl, enabled, modmacro=macro, node=node ) self._populate_vhost_names_v2(vhost) return vhost ``` ###Assistant : Used by get_virtual_hosts_v2 to create vhost objects using ParserNode interfaces. :param interfaces.BlockNode node: The BlockNode object of VirtualHost block :returns: newly created vhost :rtype: :class:`~certbot_apache.obj.VirtualHost`
2,658
def insert_on(self, path, loc=None, replace=False): loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return
Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path.
100
154
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def insert_on(self, path, loc=None, replace=False): loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return ``` ###Assistant : Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path.
2,659
def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention else None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=output.hidden_states, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns, ) @add_start_docstrings( , XGLM_START_DOCSTRING, )
The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
20
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention else None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=output.hidden_states, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns, ) @add_start_docstrings( , XGLM_START_DOCSTRING, ) ``` ###Assistant : The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
2,660
def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs): preprocessed_map_func = cls.preprocess_func(func) key_futures = RayWrapper.materialize( [ partition.apply(preprocessed_map_func, **kwargs) for partition in partitions ] ) gpu_managers = [partition.get_gpu_manager() for partition in partitions] return cls._create_partitions(key_futures, gpu_managers)
Apply `func` to a list of remote partitions from `partitions`. Parameters ---------- func : callable The function to apply. partitions : np.ndarray NumPy array with partitions. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- np.ndarray A NumPy array of ``cuDFOnRayDataframePartition`` objects. Notes ----- This preprocesses the `func` first before applying it to the partitions.
59
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs): preprocessed_map_func = cls.preprocess_func(func) key_futures = RayWrapper.materialize( [ partition.apply(preprocessed_map_func, **kwargs) for partition in partitions ] ) gpu_managers = [partition.get_gpu_manager() for partition in partitions] return cls._create_partitions(key_futures, gpu_managers) ``` ###Assistant : Apply `func` to a list of remote partitions from `partitions`. Parameters ---------- func : callable The function to apply. partitions : np.ndarray NumPy array with partitions. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- np.ndarray A NumPy array of ``cuDFOnRayDataframePartition`` objects. Notes ----- This preprocesses the `func` first before applying it to the partitions.
2,661
def _execute_impl(self, *args, **kwargs) -> ObjectRef: return self._deployment_function_handle.remote( *self._bound_args, **self._bound_kwargs )
Executor of DeploymentNode getting called each time on dag.execute. The execute implementation is recursive, that is, the method nodes will receive whatever this method returns. We return a handle here so method node can directly call upon.
37
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _execute_impl(self, *args, **kwargs) -> ObjectRef: return self._deployment_function_handle.remote( *self._bound_args, **self._bound_kwargs ) ``` ###Assistant : Executor of DeploymentNode getting called each time on dag.execute. The execute implementation is recursive, that is, the method nodes will receive whatever this method returns. We return a handle here so method node can directly call upon.
2,662
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): if data_format is None: data_format = backend.image_data_format() img = array_to_img(x, data_format=data_format, scale=scale) if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'): warnings.warn('The JPG format does not support ' 'RGBA images, converting to RGB.') img = img.convert('RGB') img.save(path, format=file_format, **kwargs) @keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')
Saves an image stored as a Numpy array to a path or file object. Args: path: Path or file object. x: Numpy array. data_format: Image data format, either "channels_first" or "channels_last". file_format: Optional file format override. If omitted, the format to use is determined from the filename extension. If a file object was used instead of a filename, this parameter should always be used. scale: Whether to rescale image values to be within `[0, 255]`. **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
82
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): if data_format is None: data_format = backend.image_data_format() img = array_to_img(x, data_format=data_format, scale=scale) if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'): warnings.warn('The JPG format does not support ' 'RGBA images, converting to RGB.') img = img.convert('RGB') img.save(path, format=file_format, **kwargs) @keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img') ``` ###Assistant : Saves an image stored as a Numpy array to a path or file object. Args: path: Path or file object. x: Numpy array. data_format: Image data format, either "channels_first" or "channels_last". file_format: Optional file format override. If omitted, the format to use is determined from the filename extension. If a file object was used instead of a filename, this parameter should always be used. scale: Whether to rescale image values to be within `[0, 255]`. **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
2,663
def log_commenting_changes(self, changes, revision): for comment in changes["new_comments"]: comment.log_create(page_revision=revision, user=self.request.user) for comment in changes["edited_comments"]: comment.log_edit(page_revision=revision, user=self.request.user) for comment in changes["resolved_comments"]: comment.log_resolve(page_revision=revision, user=self.request.user) for comment in changes["deleted_comments"]: comment.log_delete(page_revision=revision, user=self.request.user) for comment, replies in changes["new_replies"]: for reply in replies: reply.log_create(page_revision=revision, user=self.request.user) for comment, replies in changes["edited_replies"]: for reply in replies: reply.log_edit(page_revision=revision, user=self.request.user) for comment, replies in changes["deleted_replies"]: for reply in replies: reply.log_delete(page_revision=revision, user=self.request.user)
Generates log entries for any changes made to comments or replies.
11
61
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def log_commenting_changes(self, changes, revision): for comment in changes["new_comments"]: comment.log_create(page_revision=revision, user=self.request.user) for comment in changes["edited_comments"]: comment.log_edit(page_revision=revision, user=self.request.user) for comment in changes["resolved_comments"]: comment.log_resolve(page_revision=revision, user=self.request.user) for comment in changes["deleted_comments"]: comment.log_delete(page_revision=revision, user=self.request.user) for comment, replies in changes["new_replies"]: for reply in replies: reply.log_create(page_revision=revision, user=self.request.user) for comment, replies in changes["edited_replies"]: for reply in replies: reply.log_edit(page_revision=revision, user=self.request.user) for comment, replies in changes["deleted_replies"]: for reply in replies: reply.log_delete(page_revision=revision, user=self.request.user) ``` ###Assistant : Generates log entries for any changes made to comments or replies.
2,664
def __new__(cls, stylename, **kwargs): # The "class" should have the _style_list attribute, which is a mapping # of style names to style classes. _list = stylename.replace(" ", "").split(",") _name = _list[0].lower() try: _cls = cls._style_list[_name] except KeyError as err: raise ValueError(f"Unknown style: {stylename}") from err try: _args_pair = [cs.split("=") for cs in _list[1:]] _args = {k: float(v) for k, v in _args_pair} except ValueError as err: raise ValueError(f"Incorrect style argument: {stylename}") from err return _cls(**{**_args, **kwargs})
Return the instance of the subclass with the given style name.
11
76
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, stylename, **kwargs): # The "class" should have the _style_list attribute, which is a mapping # of style names to style classes. _list = stylename.replace(" ", "").split(",") _name = _list[0].lower() try: _cls = cls._style_list[_name] except KeyError as err: raise ValueError(f"Unknown style: {stylename}") from err try: _args_pair = [cs.split("=") for cs in _list[1:]] _args = {k: float(v) for k, v in _args_pair} except ValueError as err: raise ValueError(f"Incorrect style argument: {stylename}") from err return _cls(**{**_args, **kwargs}) ``` ###Assistant : Return the instance of the subclass with the given style name.
2,665
def losses(self): collected_losses = [] for layer in self._flatten_layers(): # If any eager losses are present, we assume the model to be part of # an eager training loop (either a custom one or the one used when # `run_eagerly=True`) and so we always return just the eager losses. if layer._eager_losses: # Filter placeholder losses that may have been added by revived # layers. (see base_layer_utils for details). if ( layer._eager_losses[0] is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER ): collected_losses.extend(layer._eager_losses) else: collected_losses.extend(layer._losses) for regularizer in layer._callable_losses: loss_tensor = regularizer() if loss_tensor is not None: collected_losses.append(loss_tensor) return collected_losses
List of losses added using the `add_loss()` API. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing `losses` under a `tf.GradientTape` will propagate gradients back to the corresponding variables. Examples: >>> class MyLayer(tf.keras.layers.Layer): ... def call(self, inputs): ... self.add_loss(tf.abs(tf.reduce_mean(inputs))) ... return inputs >>> l = MyLayer() >>> l(np.ones((10, 1))) >>> l.losses [1.0] >>> inputs = tf.keras.Input(shape=(10,)) >>> x = tf.keras.layers.Dense(10)(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Activity regularization. >>> len(model.losses) 0 >>> model.add_loss(tf.abs(tf.reduce_mean(x))) >>> len(model.losses) 1 >>> inputs = tf.keras.Input(shape=(10,)) >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones') >>> x = d(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Weight regularization. >>> model.add_loss(lambda: tf.reduce_mean(d.kernel)) >>> model.losses [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>] Returns: A list of tensors.
128
93
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def losses(self): collected_losses = [] for layer in self._flatten_layers(): # If any eager losses are present, we assume the model to be part of # an eager training loop (either a custom one or the one used when # `run_eagerly=True`) and so we always return just the eager losses. if layer._eager_losses: # Filter placeholder losses that may have been added by revived # layers. (see base_layer_utils for details). if ( layer._eager_losses[0] is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER ): collected_losses.extend(layer._eager_losses) else: collected_losses.extend(layer._losses) for regularizer in layer._callable_losses: loss_tensor = regularizer() if loss_tensor is not None: collected_losses.append(loss_tensor) return collected_losses ``` ###Assistant : List of losses added using the `add_loss()` API. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing `losses` under a `tf.GradientTape` will propagate gradients back to the corresponding variables. Examples: >>> class MyLayer(tf.keras.layers.Layer): ... def call(self, inputs): ... self.add_loss(tf.abs(tf.reduce_mean(inputs))) ... return inputs >>> l = MyLayer() >>> l(np.ones((10, 1))) >>> l.losses [1.0] >>> inputs = tf.keras.Input(shape=(10,)) >>> x = tf.keras.layers.Dense(10)(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Activity regularization. >>> len(model.losses) 0 >>> model.add_loss(tf.abs(tf.reduce_mean(x))) >>> len(model.losses) 1 >>> inputs = tf.keras.Input(shape=(10,)) >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones') >>> x = d(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Weight regularization. >>> model.add_loss(lambda: tf.reduce_mean(d.kernel)) >>> model.losses [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>] Returns: A list of tensors.
2,666
def fit(self, X, y, Xy=None): self._validate_params() X, y = self._validate_data(X, y, y_numeric=True, multi_output=True) _normalize = _deprecate_normalize( self.normalize, default=True, estimator_name=self.__class__.__name__ ) alpha = getattr(self, "alpha", 0.0) if hasattr(self, "n_nonzero_coefs"): alpha = 0.0 # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter if self.jitter is not None: rng = check_random_state(self.random_state) noise = rng.uniform(high=self.jitter, size=len(y)) y = y + noise self._fit( X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, normalize=_normalize, Xy=Xy, ) return self
Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \ default=None Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Returns ------- self : object Returns an instance of self.
70
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y, Xy=None): self._validate_params() X, y = self._validate_data(X, y, y_numeric=True, multi_output=True) _normalize = _deprecate_normalize( self.normalize, default=True, estimator_name=self.__class__.__name__ ) alpha = getattr(self, "alpha", 0.0) if hasattr(self, "n_nonzero_coefs"): alpha = 0.0 # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter if self.jitter is not None: rng = check_random_state(self.random_state) noise = rng.uniform(high=self.jitter, size=len(y)) y = y + noise self._fit( X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, normalize=_normalize, Xy=Xy, ) return self ``` ###Assistant : Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \ default=None Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Returns ------- self : object Returns an instance of self.
2,667
def _decode_block_string(block_string): assert isinstance(block_string, str) ops = block_string.split('_') options = {} for op in ops: splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # Check stride cond_1 = ('s' in options and len(options['s']) == 1) cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s'][1])) assert (cond_1 or cond_2) return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])])
Gets a block through a string notation of arguments.
9
73
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _decode_block_string(block_string): assert isinstance(block_string, str) ops = block_string.split('_') options = {} for op in ops: splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # Check stride cond_1 = ('s' in options and len(options['s']) == 1) cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s'][1])) assert (cond_1 or cond_2) return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])]) ``` ###Assistant : Gets a block through a string notation of arguments.
2,668
def copy(a, order='K', subok=False): return array(a, order=order, subok=subok, copy=True) # Basic operations
Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (defaults to False). .. versionadded:: 1.19.0 Returns ------- arr : ndarray Array interpretation of `a`. See Also -------- ndarray.copy : Preferred method for creating an array copy Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False Note that, np.copy clears previously set WRITEABLE=False flag. >>> a = np.array([1, 2, 3]) >>> a.flags["WRITEABLE"] = False >>> b = np.copy(a) >>> b.flags["WRITEABLE"] True >>> b[0] = 3 >>> b array([3, 2, 3]) Note that np.copy is a shallow copy and will not copy object elements within arrays. This is mainly important for arrays containing Python objects. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> b = np.copy(a) >>> b[2][0] = 10 >>> a array([1, 'm', list([10, 3, 4])], dtype=object) To ensure all elements within an ``object`` array are copied, use `copy.deepcopy`: >>> import copy >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object)
340
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copy(a, order='K', subok=False): return array(a, order=order, subok=subok, copy=True) # Basic operations ``` ###Assistant : Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (defaults to False). .. versionadded:: 1.19.0 Returns ------- arr : ndarray Array interpretation of `a`. See Also -------- ndarray.copy : Preferred method for creating an array copy Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False Note that, np.copy clears previously set WRITEABLE=False flag. >>> a = np.array([1, 2, 3]) >>> a.flags["WRITEABLE"] = False >>> b = np.copy(a) >>> b.flags["WRITEABLE"] True >>> b[0] = 3 >>> b array([3, 2, 3]) Note that np.copy is a shallow copy and will not copy object elements within arrays. This is mainly important for arrays containing Python objects. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> b = np.copy(a) >>> b[2][0] = 10 >>> a array([1, 'm', list([10, 3, 4])], dtype=object) To ensure all elements within an ``object`` array are copied, use `copy.deepcopy`: >>> import copy >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object)
2,669
def test_context_placement_group(): driver_code = proc = run_string_as_driver_nonblocking(driver_code)
import ray from ray.data.context import DatasetContext from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from ray._private.test_utils import placement_group_assert_no_leak ray.init(num_cpus=1) context = DatasetContext.get_current() # This placement group will take up all cores of the local cluster. placement_group = ray.util.placement_group( name="core_hog", strategy="SPREAD", bundles=[ {"CPU": 1}, ], ) ray.get(placement_group.ready()) context.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group) pipe = ray.data.range(100, parallelism=2) \ .window(blocks_per_window=1) \ .map(lambda x: x + 1) assert pipe.take_all() == list(range(1, 101)) placement_group_assert_no_leak([placement_group]) ray.shutdown()
64
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_context_placement_group(): driver_code = proc = run_string_as_driver_nonblocking(driver_code) ``` ###Assistant : import ray from ray.data.context import DatasetContext from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from ray._private.test_utils import placement_group_assert_no_leak ray.init(num_cpus=1) context = DatasetContext.get_current() # This placement group will take up all cores of the local cluster. placement_group = ray.util.placement_group( name="core_hog", strategy="SPREAD", bundles=[ {"CPU": 1}, ], ) ray.get(placement_group.ready()) context.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group) pipe = ray.data.range(100, parallelism=2) \ .window(blocks_per_window=1) \ .map(lambda x: x + 1) assert pipe.take_all() == list(range(1, 101)) placement_group_assert_no_leak([placement_group]) ray.shutdown()
2,670
def reorder_categories(self, new_categories, ordered=None): if set(self.dtype.categories) != set(new_categories): raise ValueError( "items in new_categories are not the same as in old categories" ) return self.set_categories(new_categories, ordered=ordered)
Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. Returns ------- cat : Categorical Categorical with reordered categories. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories : Rename categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones.
114
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reorder_categories(self, new_categories, ordered=None): if set(self.dtype.categories) != set(new_categories): raise ValueError( "items in new_categories are not the same as in old categories" ) return self.set_categories(new_categories, ordered=ordered) ``` ###Assistant : Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. Returns ------- cat : Categorical Categorical with reordered categories. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories : Rename categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones.
2,671
def test_get_member_list_no_permission_former_member_with_at_token(self): # create a room, invite the user and the user joins room_id = self.helper.create_room_as("@alice:red") self.helper.invite(room_id, "@alice:red", self.user_id) self.helper.join(room_id, self.user_id) # sync to get an at token channel = self.make_request("GET", "/sync") self.assertEqual(200, channel.code) sync_token = channel.json_body["next_batch"] # check that the user can see the member list to start with channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) self.assertEqual(200, channel.code, msg=channel.result["body"]) # ban the user (Note: the user is actually allowed to see this event and # state so that they know they're banned!) self.helper.change_membership(room_id, "@alice:red", self.user_id, "ban") # invite a third user and let them join self.helper.invite(room_id, "@alice:red", "@bob:red") self.helper.join(room_id, "@bob:red") # now, with the original user, sync again to get a new at token channel = self.make_request("GET", "/sync") self.assertEqual(200, channel.code) sync_token = channel.json_body["next_batch"] # check the user can no longer see the updated member list channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) self.assertEqual(403, channel.code, msg=channel.result["body"])
Tests that a former member of the room can not get the member list (in the case that they use an at token).
23
150
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_member_list_no_permission_former_member_with_at_token(self): # create a room, invite the user and the user joins room_id = self.helper.create_room_as("@alice:red") self.helper.invite(room_id, "@alice:red", self.user_id) self.helper.join(room_id, self.user_id) # sync to get an at token channel = self.make_request("GET", "/sync") self.assertEqual(200, channel.code) sync_token = channel.json_body["next_batch"] # check that the user can see the member list to start with channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) self.assertEqual(200, channel.code, msg=channel.result["body"]) # ban the user (Note: the user is actually allowed to see this event and # state so that they know they're banned!) self.helper.change_membership(room_id, "@alice:red", self.user_id, "ban") # invite a third user and let them join self.helper.invite(room_id, "@alice:red", "@bob:red") self.helper.join(room_id, "@bob:red") # now, with the original user, sync again to get a new at token channel = self.make_request("GET", "/sync") self.assertEqual(200, channel.code) sync_token = channel.json_body["next_batch"] # check the user can no longer see the updated member list channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) self.assertEqual(403, channel.code, msg=channel.result["body"]) ``` ###Assistant : Tests that a former member of the room can not get the member list (in the case that they use an at token).
2,672
def _keep_original_ws(s, tag_s): return ''.join( c if tag_c == " " and c.isspace() else tag_c for c, tag_c in zip(s, tag_s) )
Replace whitespace with the original whitespace characters in `s`
9
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _keep_original_ws(s, tag_s): return ''.join( c if tag_c == " " and c.isspace() else tag_c for c, tag_c in zip(s, tag_s) ) ``` ###Assistant : Replace whitespace with the original whitespace characters in `s`
2,673
def unregister_cmap(name): cmap = _colormaps.get(name, None) _colormaps.unregister(name) return cmap
Remove a colormap recognized by :func:`get_cmap`. You may not remove built-in colormaps. If the named colormap is not registered, returns with no error, raises if you try to de-register a default colormap. .. warning:: Colormap names are currently a shared namespace that may be used by multiple packages. Use `unregister_cmap` only if you know you have registered that name before. In particular, do not unregister just in case to clean the name before registering a new colormap. Parameters ---------- name : str The name of the colormap to be un-registered Returns ------- ColorMap or None If the colormap was registered, return it if not return `None` Raises ------ ValueError If you try to de-register a default built-in colormap.
118
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unregister_cmap(name): cmap = _colormaps.get(name, None) _colormaps.unregister(name) return cmap ``` ###Assistant : Remove a colormap recognized by :func:`get_cmap`. You may not remove built-in colormaps. If the named colormap is not registered, returns with no error, raises if you try to de-register a default colormap. .. warning:: Colormap names are currently a shared namespace that may be used by multiple packages. Use `unregister_cmap` only if you know you have registered that name before. In particular, do not unregister just in case to clean the name before registering a new colormap. Parameters ---------- name : str The name of the colormap to be un-registered Returns ------- ColorMap or None If the colormap was registered, return it if not return `None` Raises ------ ValueError If you try to de-register a default built-in colormap.
2,674
def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: # TODO(QL): raise an error for .tar.gz files as in _get_extraction_protocol return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
read the magic number from a file-like object and return the compression protocol
13
53
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: # TODO(QL): raise an error for .tar.gz files as in _get_extraction_protocol return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") ``` ###Assistant : read the magic number from a file-like object and return the compression protocol
2,675
def _get_veths(net_data): if isinstance(net_data, dict): net_data = list(net_data.items()) nics = salt.utils.odict.OrderedDict() current_nic = salt.utils.odict.OrderedDict() no_names = True for item in net_data: if item and isinstance(item, dict): item = list(item.items())[0] # skip LXC configuration comment lines, and play only with tuples conf elif isinstance(item, str): # deal with reflection of commented lxc configs sitem = item.strip() if sitem.startswith("#") or not sitem: continue elif "=" in item: item = tuple(a.strip() for a in item.split("=", 1)) if item[0] == "lxc.network.type": current_nic = salt.utils.odict.OrderedDict() if item[0] == "lxc.network.name": no_names = False nics[item[1].strip()] = current_nic current_nic[item[0].strip()] = item[1].strip() # if not ethernet card name has been collected, assuming we collected # data for eth0 if no_names and current_nic: nics[DEFAULT_NIC] = current_nic return nics
Parse the nic setup inside lxc conf tuples back to a dictionary indexed by network interface
16
118
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_veths(net_data): if isinstance(net_data, dict): net_data = list(net_data.items()) nics = salt.utils.odict.OrderedDict() current_nic = salt.utils.odict.OrderedDict() no_names = True for item in net_data: if item and isinstance(item, dict): item = list(item.items())[0] # skip LXC configuration comment lines, and play only with tuples conf elif isinstance(item, str): # deal with reflection of commented lxc configs sitem = item.strip() if sitem.startswith("#") or not sitem: continue elif "=" in item: item = tuple(a.strip() for a in item.split("=", 1)) if item[0] == "lxc.network.type": current_nic = salt.utils.odict.OrderedDict() if item[0] == "lxc.network.name": no_names = False nics[item[1].strip()] = current_nic current_nic[item[0].strip()] = item[1].strip() # if not ethernet card name has been collected, assuming we collected # data for eth0 if no_names and current_nic: nics[DEFAULT_NIC] = current_nic return nics ``` ###Assistant : Parse the nic setup inside lxc conf tuples back to a dictionary indexed by network interface
2,676
def start_stdout_logging() -> None: if '_stdout_' in _handlers: return handler = StreamHandler(sys.stdout) handler.setFormatter(_StdoutFormatter()) _handlers['_stdout_'] = handler _root_logger.addHandler(handler)
Register the stdout handler. This function should be invoked on importing nni. It is safe to call it multiple times.
20
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def start_stdout_logging() -> None: if '_stdout_' in _handlers: return handler = StreamHandler(sys.stdout) handler.setFormatter(_StdoutFormatter()) _handlers['_stdout_'] = handler _root_logger.addHandler(handler) ``` ###Assistant : Register the stdout handler. This function should be invoked on importing nni. It is safe to call it multiple times.
2,677
def get_staffing_plan_detail(designation, company, offer_date): detail = frappe.db.sql( , (designation, company, offer_date), as_dict=1, ) return frappe._dict(detail[0]) if (detail and detail[0].parent) else None @frappe.whitelist()
SELECT DISTINCT spd.parent, sp.from_date as from_date, sp.to_date as to_date, sp.name, sum(spd.vacancies) as vacancies, spd.designation FROM `tabStaffing Plan Detail` spd, `tabStaffing Plan` sp WHERE sp.docstatus=1 AND spd.designation=%s AND sp.company=%s AND spd.parent = sp.name AND %s between sp.from_date and sp.to_date
38
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_staffing_plan_detail(designation, company, offer_date): detail = frappe.db.sql( , (designation, company, offer_date), as_dict=1, ) return frappe._dict(detail[0]) if (detail and detail[0].parent) else None @frappe.whitelist() ``` ###Assistant : SELECT DISTINCT spd.parent, sp.from_date as from_date, sp.to_date as to_date, sp.name, sum(spd.vacancies) as vacancies, spd.designation FROM `tabStaffing Plan Detail` spd, `tabStaffing Plan` sp WHERE sp.docstatus=1 AND spd.designation=%s AND sp.company=%s AND spd.parent = sp.name AND %s between sp.from_date and sp.to_date
2,678
def test_display_name(self) -> None: evaluator = self._get_evaluator({"body": "foo bar baz"}) condition = { "kind": "contains_display_name", } # Blank names are skipped. self.assertFalse(evaluator.matches(condition, "@user:test", "")) # Check a display name that doesn't match. self.assertFalse(evaluator.matches(condition, "@user:test", "not found")) # Check a display name which matches. self.assertTrue(evaluator.matches(condition, "@user:test", "foo")) # A display name that matches, but not a full word does not result in a match. self.assertFalse(evaluator.matches(condition, "@user:test", "ba")) # A display name should not be interpreted as a regular expression. self.assertFalse(evaluator.matches(condition, "@user:test", "ba[rz]")) # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar"))
Check for a matching display name in the body of the event.
12
94
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_display_name(self) -> None: evaluator = self._get_evaluator({"body": "foo bar baz"}) condition = { "kind": "contains_display_name", } # Blank names are skipped. self.assertFalse(evaluator.matches(condition, "@user:test", "")) # Check a display name that doesn't match. self.assertFalse(evaluator.matches(condition, "@user:test", "not found")) # Check a display name which matches. self.assertTrue(evaluator.matches(condition, "@user:test", "foo")) # A display name that matches, but not a full word does not result in a match. self.assertFalse(evaluator.matches(condition, "@user:test", "ba")) # A display name should not be interpreted as a regular expression. self.assertFalse(evaluator.matches(condition, "@user:test", "ba[rz]")) # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) ``` ###Assistant : Check for a matching display name in the body of the event.
2,679
def test_readlink_not_a_link(file, source): with pytest.raises(SaltInvocationError) as exc: file.readlink(path=source) assert "A valid link was not specified" in exc.value.message
Test readlink where the path is not a link Should throw a SaltInvocationError
13
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_readlink_not_a_link(file, source): with pytest.raises(SaltInvocationError) as exc: file.readlink(path=source) assert "A valid link was not specified" in exc.value.message ``` ###Assistant : Test readlink where the path is not a link Should throw a SaltInvocationError
2,680
def get_periodic_data(entry, filters): periodic_data = {} for d in entry: period = get_period(d.posting_date, filters) bal_qty = 0 # if period against item does not exist yet, instantiate it # insert existing balance dict against period, and add/subtract to it if periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period): previous_balance = periodic_data[d.item_code]["balance"].copy() periodic_data[d.item_code][period] = previous_balance if d.voucher_type == "Stock Reconciliation": if periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get("balance").get( d.warehouse ): bal_qty = periodic_data[d.item_code]["balance"][d.warehouse] qty_diff = d.qty_after_transaction - bal_qty else: qty_diff = d.actual_qty if filters["value_quantity"] == "Quantity": value = qty_diff else: value = d.stock_value_difference # period-warehouse wise balance periodic_data.setdefault(d.item_code, {}).setdefault("balance", {}).setdefault(d.warehouse, 0.0) periodic_data.setdefault(d.item_code, {}).setdefault(period, {}).setdefault(d.warehouse, 0.0) periodic_data[d.item_code]["balance"][d.warehouse] += value periodic_data[d.item_code][period][d.warehouse] = periodic_data[d.item_code]["balance"][ d.warehouse ] return periodic_data
Structured as: Item 1 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value Item 2 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value
118
106
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_periodic_data(entry, filters): periodic_data = {} for d in entry: period = get_period(d.posting_date, filters) bal_qty = 0 # if period against item does not exist yet, instantiate it # insert existing balance dict against period, and add/subtract to it if periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period): previous_balance = periodic_data[d.item_code]["balance"].copy() periodic_data[d.item_code][period] = previous_balance if d.voucher_type == "Stock Reconciliation": if periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get("balance").get( d.warehouse ): bal_qty = periodic_data[d.item_code]["balance"][d.warehouse] qty_diff = d.qty_after_transaction - bal_qty else: qty_diff = d.actual_qty if filters["value_quantity"] == "Quantity": value = qty_diff else: value = d.stock_value_difference # period-warehouse wise balance periodic_data.setdefault(d.item_code, {}).setdefault("balance", {}).setdefault(d.warehouse, 0.0) periodic_data.setdefault(d.item_code, {}).setdefault(period, {}).setdefault(d.warehouse, 0.0) periodic_data[d.item_code]["balance"][d.warehouse] += value periodic_data[d.item_code][period][d.warehouse] = periodic_data[d.item_code]["balance"][ d.warehouse ] return periodic_data ``` ###Assistant : Structured as: Item 1 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value Item 2 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value
2,681
def shuffle(*arrays, random_state=None, n_samples=None): return resample( *arrays, replace=False, n_samples=n_samples, random_state=random_state )
Shuffle arrays or sparse matrices in a consistent way. This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. It should not be larger than the length of arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled copies of the collections. The original arrays are not impacted. See Also -------- resample : Resample arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[0., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[0., 0.], [2., 1.], [1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1])
248
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shuffle(*arrays, random_state=None, n_samples=None): return resample( *arrays, replace=False, n_samples=n_samples, random_state=random_state ) ``` ###Assistant : Shuffle arrays or sparse matrices in a consistent way. This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. It should not be larger than the length of arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled copies of the collections. The original arrays are not impacted. See Also -------- resample : Resample arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[0., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[0., 0.], [2., 1.], [1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1])
2,682
def check_changes(self, args, results): # type: (SanityConfig, Results) -> None integration_targets = list(walk_integration_targets()) module_targets = list(walk_module_targets()) integration_targets_by_name = dict((target.name, target) for target in integration_targets) module_names_by_path = dict((target.path, target.module) for target in module_targets) disabled_targets = [] unstable_targets = [] unsupported_targets = [] for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]: for target in args.metadata.change_description.focused_command_targets[command]: if self.DISABLED in integration_targets_by_name[target].aliases: disabled_targets.append(target) elif self.UNSTABLE in integration_targets_by_name[target].aliases: unstable_targets.append(target) elif self.UNSUPPORTED in integration_targets_by_name[target].aliases: unsupported_targets.append(target) untested_modules = [] for path in args.metadata.change_description.no_integration_paths: module = module_names_by_path.get(path) if module: untested_modules.append(module) comments = [ self.format_comment(self.TEMPLATE_DISABLED, disabled_targets), self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets), self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets), self.format_comment(self.TEMPLATE_UNTESTED, untested_modules), ] comments = [comment for comment in comments if comment] labels = dict( needs_tests=bool(untested_modules), disabled_tests=bool(disabled_targets), unstable_tests=bool(unstable_targets), unsupported_tests=bool(unsupported_targets), ) results.comments += comments results.labels.update(labels)
Check changes and store results in the provided result dictionary.
10
118
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_changes(self, args, results): # type: (SanityConfig, Results) -> None integration_targets = list(walk_integration_targets()) module_targets = list(walk_module_targets()) integration_targets_by_name = dict((target.name, target) for target in integration_targets) module_names_by_path = dict((target.path, target.module) for target in module_targets) disabled_targets = [] unstable_targets = [] unsupported_targets = [] for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]: for target in args.metadata.change_description.focused_command_targets[command]: if self.DISABLED in integration_targets_by_name[target].aliases: disabled_targets.append(target) elif self.UNSTABLE in integration_targets_by_name[target].aliases: unstable_targets.append(target) elif self.UNSUPPORTED in integration_targets_by_name[target].aliases: unsupported_targets.append(target) untested_modules = [] for path in args.metadata.change_description.no_integration_paths: module = module_names_by_path.get(path) if module: untested_modules.append(module) comments = [ self.format_comment(self.TEMPLATE_DISABLED, disabled_targets), self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets), self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets), self.format_comment(self.TEMPLATE_UNTESTED, untested_modules), ] comments = [comment for comment in comments if comment] labels = dict( needs_tests=bool(untested_modules), disabled_tests=bool(disabled_targets), unstable_tests=bool(unstable_targets), unsupported_tests=bool(unsupported_targets), ) results.comments += comments results.labels.update(labels) ``` ###Assistant : Check changes and store results in the provided result dictionary.
2,683
def test_overlap_first(business_client, setup_before_upload, show_overlap_first): c = business_client config = dict( title='test_overlap_first', is_published=True, maximum_annotations=1, show_overlap_first=show_overlap_first, sampling="Uniform sampling", label_config= ) project = make_project(config, business_client.user) annotation_result = json.dumps([{ 'from_name': 'text_class', 'to_name': 'text', 'type': 'choices', 'value': {'choices': ['class_A']} }]) num_tasks = 1000 overlap_cohort_percentage = 1 # set up tasks overlap setup_after_upload = True if setup_before_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 setup_after_upload = False # create tasks tasks = [] for i in range(num_tasks): tasks.append({'data': {'text': f'this is {str(i)}'}}) r = business_client.post( f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json') assert r.status_code == 201 if setup_after_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks) assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap
<View> <Text name="text" value="$text"></Text> <Choices name="text_class" choice="single"> <Choice value="class_A"></Choice> <Choice value="class_B"></Choice> </Choices> </View>
13
122
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_overlap_first(business_client, setup_before_upload, show_overlap_first): c = business_client config = dict( title='test_overlap_first', is_published=True, maximum_annotations=1, show_overlap_first=show_overlap_first, sampling="Uniform sampling", label_config= ) project = make_project(config, business_client.user) annotation_result = json.dumps([{ 'from_name': 'text_class', 'to_name': 'text', 'type': 'choices', 'value': {'choices': ['class_A']} }]) num_tasks = 1000 overlap_cohort_percentage = 1 # set up tasks overlap setup_after_upload = True if setup_before_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 setup_after_upload = False # create tasks tasks = [] for i in range(num_tasks): tasks.append({'data': {'text': f'this is {str(i)}'}}) r = business_client.post( f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json') assert r.status_code == 201 if setup_after_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks) assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap ``` ###Assistant : <View> <Text name="text" value="$text"></Text> <Choices name="text_class" choice="single"> <Choice value="class_A"></Choice> <Choice value="class_B"></Choice> </Choices> </View>
2,684
def svd_flip(u, v, u_based_decision=True): if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) signs = np.sign(u[max_abs_cols, range(u.shape[1])]) u *= signs v *= signs[:, np.newaxis] else: # rows of v, columns of u max_abs_rows = np.argmax(np.abs(v), axis=1) signs = np.sign(v[range(v.shape[0]), max_abs_rows]) u *= signs v *= signs[:, np.newaxis] return u, v
Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. Parameters ---------- u : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. v : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. The input v should really be called vt to be consistent with scipy's output. u_based_decision : bool, default=True If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted : ndarray Array u with adjusted columns and the same dimensions as u. v_adjusted : ndarray Array v with adjusted rows and the same dimensions as v.
171
54
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def svd_flip(u, v, u_based_decision=True): if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) signs = np.sign(u[max_abs_cols, range(u.shape[1])]) u *= signs v *= signs[:, np.newaxis] else: # rows of v, columns of u max_abs_rows = np.argmax(np.abs(v), axis=1) signs = np.sign(v[range(v.shape[0]), max_abs_rows]) u *= signs v *= signs[:, np.newaxis] return u, v ``` ###Assistant : Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. Parameters ---------- u : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. v : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. The input v should really be called vt to be consistent with scipy's output. u_based_decision : bool, default=True If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted : ndarray Array u with adjusted columns and the same dimensions as u. v_adjusted : ndarray Array v with adjusted rows and the same dimensions as v.
2,685
def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_fetches = {} # Action-logp and action-prob. if self._sampled_action_logp is not None: extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp # Action-dist inputs. if self._dist_inputs is not None: extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs return extra_fetches
Extra values to fetch and return from compute_actions(). By default we return action probability/log-likelihood info and action distribution inputs (if present). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call.
37
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_fetches = {} # Action-logp and action-prob. if self._sampled_action_logp is not None: extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp # Action-dist inputs. if self._dist_inputs is not None: extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs return extra_fetches ``` ###Assistant : Extra values to fetch and return from compute_actions(). By default we return action probability/log-likelihood info and action distribution inputs (if present). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call.
2,686
async def cleanup_finished_triggers(self): for trigger_id, details in list(self.triggers.items()): if details["task"].done(): # Check to see if it exited for good reasons saved_exc = None try: result = details["task"].result() except (asyncio.CancelledError, SystemExit, KeyboardInterrupt): # These are "expected" exceptions and we stop processing here # If we don't, then the system requesting a trigger be removed - # which turns into CancelledError - results in a failure. del self.triggers[trigger_id] continue except BaseException as e: # This is potentially bad, so log it. self.log.exception("Trigger %s exited with error %s", details["name"], e) saved_exc = e else: # See if they foolishly returned a TriggerEvent if isinstance(result, TriggerEvent): self.log.error( "Trigger %s returned a TriggerEvent rather than yielding it", details["name"] ) # See if this exited without sending an event, in which case # any task instances depending on it need to be failed if details["events"] == 0: self.log.error( "Trigger %s exited without sending an event. Dependent tasks will be failed.", details["name"], ) self.failed_triggers.append((trigger_id, saved_exc)) del self.triggers[trigger_id] await asyncio.sleep(0)
Go through all trigger tasks (coroutines) and clean up entries for ones that have exited, optionally warning users if the exit was not normal.
24
162
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def cleanup_finished_triggers(self): for trigger_id, details in list(self.triggers.items()): if details["task"].done(): # Check to see if it exited for good reasons saved_exc = None try: result = details["task"].result() except (asyncio.CancelledError, SystemExit, KeyboardInterrupt): # These are "expected" exceptions and we stop processing here # If we don't, then the system requesting a trigger be removed - # which turns into CancelledError - results in a failure. del self.triggers[trigger_id] continue except BaseException as e: # This is potentially bad, so log it. self.log.exception("Trigger %s exited with error %s", details["name"], e) saved_exc = e else: # See if they foolishly returned a TriggerEvent if isinstance(result, TriggerEvent): self.log.error( "Trigger %s returned a TriggerEvent rather than yielding it", details["name"] ) # See if this exited without sending an event, in which case # any task instances depending on it need to be failed if details["events"] == 0: self.log.error( "Trigger %s exited without sending an event. Dependent tasks will be failed.", details["name"], ) self.failed_triggers.append((trigger_id, saved_exc)) del self.triggers[trigger_id] await asyncio.sleep(0) ``` ###Assistant : Go through all trigger tasks (coroutines) and clean up entries for ones that have exited, optionally warning users if the exit was not normal.
2,687
def get_global_travel_time(self): gtt = 0 for entity in self.tripinfo: gtt += self.get_duration(entity, default=0.0) for entity in self.personinfo: gtt += self.get_duration(entity, default=0.0) return gtt ########################################################################### # ROUTING
Returns the global travel time computed from SUMO tripinfo data. The functions process_tripinfo_file() needs to be called in advance to initialize the data structures required.
25
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_global_travel_time(self): gtt = 0 for entity in self.tripinfo: gtt += self.get_duration(entity, default=0.0) for entity in self.personinfo: gtt += self.get_duration(entity, default=0.0) return gtt ########################################################################### # ROUTING ``` ###Assistant : Returns the global travel time computed from SUMO tripinfo data. The functions process_tripinfo_file() needs to be called in advance to initialize the data structures required.
2,688
def post(self, url, data=None, json=None, **kwargs): r return self.request("POST", url, data=data, json=json, **kwargs)
Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response
55
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def post(self, url, data=None, json=None, **kwargs): r return self.request("POST", url, data=data, json=json, **kwargs) ``` ###Assistant : Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response
2,689
def __getitem__(self, idx): # type: (int) -> HPackHdrEntry assert idx >= 0 if idx > type(self)._static_entries_last_idx: idx -= type(self)._static_entries_last_idx + 1 if idx >= len(self._dynamic_table): raise KeyError( 'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_table)) # noqa: E501 ) return self._dynamic_table[idx] return type(self)._static_entries[idx]
Gets an element from the header tables (static or dynamic indifferently) :param int idx: the index number of the entry to retrieve. If the index value is superior to the last index of the static entry table, then the dynamic entry type is requested, following the procedure described in RFC 7541 par2.3.3 :return: HPackHdrEntry: the entry defined at this requested index. If the entry does not exist, KeyError is # noqa: E501 raised :raises: KeyError, AssertionError
76
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getitem__(self, idx): # type: (int) -> HPackHdrEntry assert idx >= 0 if idx > type(self)._static_entries_last_idx: idx -= type(self)._static_entries_last_idx + 1 if idx >= len(self._dynamic_table): raise KeyError( 'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_table)) # noqa: E501 ) return self._dynamic_table[idx] return type(self)._static_entries[idx] ``` ###Assistant : Gets an element from the header tables (static or dynamic indifferently) :param int idx: the index number of the entry to retrieve. If the index value is superior to the last index of the static entry table, then the dynamic entry type is requested, following the procedure described in RFC 7541 par2.3.3 :return: HPackHdrEntry: the entry defined at this requested index. If the entry does not exist, KeyError is # noqa: E501 raised :raises: KeyError, AssertionError
2,690
def test_pagination(self): parent = Parent.objects.create(name="anything") for i in range(1, 31): Child.objects.create(name="name %s" % i, parent=parent) Child.objects.create(name="filtered %s" % i, parent=parent) request = self.factory.get("/child/") request.user = self.superuser # Test default queryset m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 60) self.assertEqual(cl.paginator.count, 60) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6]) # Test custom queryset m = FilteredChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 30) self.assertEqual(cl.paginator.count, 30) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
Regression tests for #12893: Pagination in admins changelist doesn't use queryset set by modeladmin.
14
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pagination(self): parent = Parent.objects.create(name="anything") for i in range(1, 31): Child.objects.create(name="name %s" % i, parent=parent) Child.objects.create(name="filtered %s" % i, parent=parent) request = self.factory.get("/child/") request.user = self.superuser # Test default queryset m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 60) self.assertEqual(cl.paginator.count, 60) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6]) # Test custom queryset m = FilteredChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 30) self.assertEqual(cl.paginator.count, 30) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3]) ``` ###Assistant : Regression tests for #12893: Pagination in admins changelist doesn't use queryset set by modeladmin.
2,691
def transform(self, X): if self.solver == "lsqr": raise NotImplementedError( "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')." ) check_is_fitted(self) X = self._validate_data(X, reset=False) if self.solver == "svd": X_new = np.dot(X - self.xbar_, self.scalings_) elif self.solver == "eigen": X_new = np.dot(X, self.scalings_) return X_new[:, : self._max_components]
Project data to maximize class separation. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- X_new : ndarray of shape (n_samples, n_components) or \ (n_samples, min(rank, n_components)) Transformed data. In the case of the 'svd' solver, the shape is (n_samples, min(rank, n_components)).
46
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform(self, X): if self.solver == "lsqr": raise NotImplementedError( "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')." ) check_is_fitted(self) X = self._validate_data(X, reset=False) if self.solver == "svd": X_new = np.dot(X - self.xbar_, self.scalings_) elif self.solver == "eigen": X_new = np.dot(X, self.scalings_) return X_new[:, : self._max_components] ``` ###Assistant : Project data to maximize class separation. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- X_new : ndarray of shape (n_samples, n_components) or \ (n_samples, min(rank, n_components)) Transformed data. In the case of the 'svd' solver, the shape is (n_samples, min(rank, n_components)).
2,692
async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow): helper = await setup_test_component(hass, create_heater_cooler_service) # Simulate that current temperature is above target temp # Heating might be on, but hvac_action currently 'off' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 22, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == "heat" assert state.attributes["hvac_action"] == "idle" # Simulate that current temperature is below target temp # Heating might be on and hvac_action currently 'heat' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 19, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.HEATING, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == "heat" assert state.attributes["hvac_action"] == "heating"
Check that we haven't conflated hvac_mode and hvac_action.
8
101
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow): helper = await setup_test_component(hass, create_heater_cooler_service) # Simulate that current temperature is above target temp # Heating might be on, but hvac_action currently 'off' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 22, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == "heat" assert state.attributes["hvac_action"] == "idle" # Simulate that current temperature is below target temp # Heating might be on and hvac_action currently 'heat' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 19, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.HEATING, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == "heat" assert state.attributes["hvac_action"] == "heating" ``` ###Assistant : Check that we haven't conflated hvac_mode and hvac_action.
2,693
def not_in_timeout(cls, last_triggered, timeout): return ( last_triggered is None or timeout is None or (time.time() - last_triggered > timeout) )
Checks if current error lies not in timeout after last trigger (potential reset of connection).
15
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def not_in_timeout(cls, last_triggered, timeout): return ( last_triggered is None or timeout is None or (time.time() - last_triggered > timeout) ) ``` ###Assistant : Checks if current error lies not in timeout after last trigger (potential reset of connection).
2,694
def get_rescored_finished(self, n_best=None): # if we never actually finished, force one if not self.finished: self.outputs[-1][0] = self.eos self.finished.append( _HypothesisTail( timestep=len(self.outputs) - 1, hypid=0, score=self.all_scores[-1][0], tokenid=self.outputs[-1][0], token_score=self.token_scores[0, -1] if self.token_scores is not None else None, token_rank=self.token_ranks[0, -1] if self.token_ranks is not None else None, ) ) rescored_finished = [] for finished_item in self.finished: current_length = finished_item.timestep + 1 # these weights are from Google NMT paper length_penalty = math.pow((1 + current_length) / 6, self.length_penalty) rescored_finished.append( _HypothesisTail( timestep=finished_item.timestep, hypid=finished_item.hypid, score=finished_item.score / length_penalty, tokenid=finished_item.tokenid, token_score=finished_item.token_score, token_rank=finished_item.token_rank, ) ) # Note: beam size is almost always pretty small, so sorting is cheap enough srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True) if n_best is not None: srted = srted[:n_best] n_best_list = [] for hyp in srted: hyp_data = self._get_hyp_from_finished(hyp) token_ids = self._get_pretty_hypothesis(hyp_data) token_metadata = ( self._get_pretty_token_metadata(hyp_data) if self.verbose else None ) n_best_list.append((token_ids, hyp.score, token_metadata)) # check that there is at least one finished candidate # and assert that each of them contains only one EOS assert ( len(n_best_list) >= 1 ), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1' for (pred, score, _) in n_best_list: assert (pred == self.eos).sum() == 1, ( f'TreeSearch returned a finalized hypo with multiple end tokens ' f'with score {score.item():.2f}' ) return n_best_list
Return finished hypotheses according to adjusted scores. Score adjustment is done according to the Google NMT paper, which penalizes long utterances. :param n_best: number of finalized hypotheses to return :return: list of (tokens, score, token_metadata) 3-tuples, in sorted order, where: - tokens is a tensor of token ids - score is the adjusted log probability of the entire utterance - token_metadata dictionary: token_logprobs -> a tensor of conditional log probabilities of tokens token_ranks -> a tensor of ranks of tokens in vocabulator, by probability, when sampled
86
201
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_rescored_finished(self, n_best=None): # if we never actually finished, force one if not self.finished: self.outputs[-1][0] = self.eos self.finished.append( _HypothesisTail( timestep=len(self.outputs) - 1, hypid=0, score=self.all_scores[-1][0], tokenid=self.outputs[-1][0], token_score=self.token_scores[0, -1] if self.token_scores is not None else None, token_rank=self.token_ranks[0, -1] if self.token_ranks is not None else None, ) ) rescored_finished = [] for finished_item in self.finished: current_length = finished_item.timestep + 1 # these weights are from Google NMT paper length_penalty = math.pow((1 + current_length) / 6, self.length_penalty) rescored_finished.append( _HypothesisTail( timestep=finished_item.timestep, hypid=finished_item.hypid, score=finished_item.score / length_penalty, tokenid=finished_item.tokenid, token_score=finished_item.token_score, token_rank=finished_item.token_rank, ) ) # Note: beam size is almost always pretty small, so sorting is cheap enough srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True) if n_best is not None: srted = srted[:n_best] n_best_list = [] for hyp in srted: hyp_data = self._get_hyp_from_finished(hyp) token_ids = self._get_pretty_hypothesis(hyp_data) token_metadata = ( self._get_pretty_token_metadata(hyp_data) if self.verbose else None ) n_best_list.append((token_ids, hyp.score, token_metadata)) # check that there is at least one finished candidate # and assert that each of them contains only one EOS assert ( len(n_best_list) >= 1 ), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1' for (pred, score, _) in n_best_list: assert (pred == self.eos).sum() == 1, ( f'TreeSearch returned a finalized hypo with multiple end tokens ' f'with score {score.item():.2f}' ) return n_best_list ``` ###Assistant : Return finished hypotheses according to adjusted scores. Score adjustment is done according to the Google NMT paper, which penalizes long utterances. :param n_best: number of finalized hypotheses to return :return: list of (tokens, score, token_metadata) 3-tuples, in sorted order, where: - tokens is a tensor of token ids - score is the adjusted log probability of the entire utterance - token_metadata dictionary: token_logprobs -> a tensor of conditional log probabilities of tokens token_ranks -> a tensor of ranks of tokens in vocabulator, by probability, when sampled
2,695
def quantile(self, q=0.5, **kwargs): return self._downsample("quantile", q=q, **kwargs)
Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the coulmns are groupby columns, and the values are its quantiles.
80
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def quantile(self, q=0.5, **kwargs): return self._downsample("quantile", q=q, **kwargs) ``` ###Assistant : Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the coulmns are groupby columns, and the values are its quantiles.
2,696
def current_columns(self): return copy.deepcopy(self.custcols) #deepcopy to prevent users from changing it
Return the currently defined custom columns Return the currently defined custom columns including the ones that haven't yet been created. It is a dict of dicts defined as follows: custcols[lookup_name] = { 'label': lookup_name, 'name': column_heading, 'datatype': datatype, 'display': display, 'normalized': None, 'colnum': an integer used internally, 'is_multiple': is_multiple, } Columns that already exist will have additional attributes that this class doesn't use. See calibre.library.field_metadata.add_custom_field() for the complete list.
69
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current_columns(self): return copy.deepcopy(self.custcols) #deepcopy to prevent users from changing it ``` ###Assistant : Return the currently defined custom columns Return the currently defined custom columns including the ones that haven't yet been created. It is a dict of dicts defined as follows: custcols[lookup_name] = { 'label': lookup_name, 'name': column_heading, 'datatype': datatype, 'display': display, 'normalized': None, 'colnum': an integer used internally, 'is_multiple': is_multiple, } Columns that already exist will have additional attributes that this class doesn't use. See calibre.library.field_metadata.add_custom_field() for the complete list.
2,697
def transform_vector(self, vector): return Vector( (vector.x + self.offset[0]) * self.scale[0], (vector.y + self.offset[1]) * self.scale[1], )
Transforms the given vector into the coordinate space of the final image. Use this to find out where a point on the source image would end up in the final image after cropping/resizing has been performed. Returns a new vector.
40
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform_vector(self, vector): return Vector( (vector.x + self.offset[0]) * self.scale[0], (vector.y + self.offset[1]) * self.scale[1], ) ``` ###Assistant : Transforms the given vector into the coordinate space of the final image. Use this to find out where a point on the source image would end up in the final image after cropping/resizing has been performed. Returns a new vector.
2,698
def _load_from_file(module_path): from imp import PY_SOURCE, load_module imported = None if module_path: with open(module_path, 'r') as openfile: imported = load_module("mod", openfile, module_path, ('imported', 'r', PY_SOURCE)) return imported
Load a python module from its absolute filesystem path
9
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load_from_file(module_path): from imp import PY_SOURCE, load_module imported = None if module_path: with open(module_path, 'r') as openfile: imported = load_module("mod", openfile, module_path, ('imported', 'r', PY_SOURCE)) return imported ``` ###Assistant : Load a python module from its absolute filesystem path
2,699
def _alter_column_type_sql(self, table, old_field, new_field, new_type): if not hasattr(old_field, "dim") or not hasattr(new_field, "dim"): return super()._alter_column_type_sql(table, old_field, new_field, new_type) if old_field.dim == 2 and new_field.dim == 3: sql_alter = self.sql_alter_column_to_3d elif old_field.dim == 3 and new_field.dim == 2: sql_alter = self.sql_alter_column_to_2d else: sql_alter = self.sql_alter_column_type return ( ( sql_alter % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], )
Special case when dimension changed.
5
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _alter_column_type_sql(self, table, old_field, new_field, new_type): if not hasattr(old_field, "dim") or not hasattr(new_field, "dim"): return super()._alter_column_type_sql(table, old_field, new_field, new_type) if old_field.dim == 2 and new_field.dim == 3: sql_alter = self.sql_alter_column_to_3d elif old_field.dim == 3 and new_field.dim == 2: sql_alter = self.sql_alter_column_to_2d else: sql_alter = self.sql_alter_column_type return ( ( sql_alter % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], ) ``` ###Assistant : Special case when dimension changed.