Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
800
def is_empty_indexer(indexer) -> bool: if is_list_like(indexer) and not len(indexer): return True if not isinstance(indexer, tuple): indexer = (indexer,) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) # ----------------------------------------------------------- # Indexer Validation
Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool
15
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_empty_indexer(indexer) -> bool: if is_list_like(indexer) and not len(indexer): return True if not isinstance(indexer, tuple): indexer = (indexer,) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) # ----------------------------------------------------------- # Indexer Validation ``` ###Assistant : Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool
801
def make_layoutgrids_gs(layoutgrids, gs): if gs in layoutgrids or gs.figure is None: return layoutgrids # in order to do constrained_layout there has to be at least *one* # gridspec in the tree: layoutgrids['hasgrids'] = True if not hasattr(gs, '_subplot_spec'): # normal gridspec parent = layoutgrids[gs.figure] layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) else: # this is a gridspecfromsubplotspec: subplot_spec = gs._subplot_spec parentgs = subplot_spec.get_gridspec() # if a nested gridspec it is possible the parent is not in there yet: if parentgs not in layoutgrids: layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs) subspeclb = layoutgrids[parentgs] # get a unique representation: rep = object.__repr__(gs) + 'top' # gridspecfromsubplotspec need an outer container: if rep not in layoutgrids: layoutgrids[rep] = mlayoutgrid.LayoutGrid( parent=subspeclb, name='top', nrows=1, ncols=1, parent_pos=(subplot_spec.rowspan, subplot_spec.colspan)) layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=layoutgrids[rep], name='gridspec', nrows=gs._nrows, ncols=gs._ncols, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) return layoutgrids
Make the layoutgrid for a gridspec (and anything nested in the gridspec)
12
134
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_layoutgrids_gs(layoutgrids, gs): if gs in layoutgrids or gs.figure is None: return layoutgrids # in order to do constrained_layout there has to be at least *one* # gridspec in the tree: layoutgrids['hasgrids'] = True if not hasattr(gs, '_subplot_spec'): # normal gridspec parent = layoutgrids[gs.figure] layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) else: # this is a gridspecfromsubplotspec: subplot_spec = gs._subplot_spec parentgs = subplot_spec.get_gridspec() # if a nested gridspec it is possible the parent is not in there yet: if parentgs not in layoutgrids: layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs) subspeclb = layoutgrids[parentgs] # get a unique representation: rep = object.__repr__(gs) + 'top' # gridspecfromsubplotspec need an outer container: if rep not in layoutgrids: layoutgrids[rep] = mlayoutgrid.LayoutGrid( parent=subspeclb, name='top', nrows=1, ncols=1, parent_pos=(subplot_spec.rowspan, subplot_spec.colspan)) layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=layoutgrids[rep], name='gridspec', nrows=gs._nrows, ncols=gs._ncols, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) return layoutgrids ``` ###Assistant : Make the layoutgrid for a gridspec (and anything nested in the gridspec)
802
def test_sitemap_published_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() for item in urlset: locations.append(item['location']) for title in Title.objects.public(): page = title.page.get_public_object() if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' if page.is_published('en') and not page.publisher_is_draft: self.assertTrue(url in locations) else: self.assertFalse(url in locations)
Check that published titles are in the urls
8
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sitemap_published_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() for item in urlset: locations.append(item['location']) for title in Title.objects.public(): page = title.page.get_public_object() if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' if page.is_published('en') and not page.publisher_is_draft: self.assertTrue(url in locations) else: self.assertFalse(url in locations) ``` ###Assistant : Check that published titles are in the urls
803
def eye(N, chunks="auto", M=None, k=0, dtype=float): eye = {} if M is None: M = N if dtype is None: dtype = float if not isinstance(chunks, (int, str)): raise ValueError("chunks must be an int or string") vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = vchunks[0] token = tokenize(N, chunks, M, k, dtype) name_eye = "eye-" + token for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = ( np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype, ) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) @derived_from(np)
Return a 2-D Array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. chunks : int, str How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A size in bytes, like "100 MiB" which will choose a uniform block-like shape - The word "auto" which acts like the above, but uses a configuration value ``array.chunk-size`` for the chunk size M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : Array of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one.
162
121
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eye(N, chunks="auto", M=None, k=0, dtype=float): eye = {} if M is None: M = N if dtype is None: dtype = float if not isinstance(chunks, (int, str)): raise ValueError("chunks must be an int or string") vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = vchunks[0] token = tokenize(N, chunks, M, k, dtype) name_eye = "eye-" + token for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = ( np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype, ) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) @derived_from(np) ``` ###Assistant : Return a 2-D Array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. chunks : int, str How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A size in bytes, like "100 MiB" which will choose a uniform block-like shape - The word "auto" which acts like the above, but uses a configuration value ``array.chunk-size`` for the chunk size M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : Array of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one.
804
def is_composite_or_composite_value(tensor): # TODO(b/125094323): This should be isinstance(CompositeTensor) or # isinstance(CompositeTensorValue) once we support that. return isinstance( tensor, ( tf.__internal__.CompositeTensor, tf.compat.v1.SparseTensorValue, tf.compat.v1.ragged.RaggedTensorValue, ), )
Returns true if 'tensor' is a CompositeTensor or a CT Value object.
12
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_composite_or_composite_value(tensor): # TODO(b/125094323): This should be isinstance(CompositeTensor) or # isinstance(CompositeTensorValue) once we support that. return isinstance( tensor, ( tf.__internal__.CompositeTensor, tf.compat.v1.SparseTensorValue, tf.compat.v1.ragged.RaggedTensorValue, ), ) ``` ###Assistant : Returns true if 'tensor' is a CompositeTensor or a CT Value object.
805
def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False): current_time = now() if not start: if minutely: start = current_time.replace(microsecond=0, second=0) else: start = current_time.replace(microsecond=0, second=0, minute=0) if not end: if minutely: end = start.replace(microsecond=0, second=0) + timedelta(minutes=1) else: end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1) start_timestamp = str(start) end_timestamp = str(end) if not partition_label: if minutely: partition_label = start.strftime('%Y%m%d_%H%M') else: partition_label = start.strftime('%Y%m%d_%H') try: with transaction.atomic(): with connection.cursor() as cursor: cursor.execute( f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} ' f'PARTITION OF {tblname} ' f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');' ) except ProgrammingError as e: logger.debug(f'Caught known error due to existing partition: {e}')
Creates new partition table for events. - start defaults to beginning of current hour - end defaults to end of current hour - partition_label defaults to YYYYMMDD_HH - minutely will create partitions that span _a single minute_ for testing purposes
40
99
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False): current_time = now() if not start: if minutely: start = current_time.replace(microsecond=0, second=0) else: start = current_time.replace(microsecond=0, second=0, minute=0) if not end: if minutely: end = start.replace(microsecond=0, second=0) + timedelta(minutes=1) else: end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1) start_timestamp = str(start) end_timestamp = str(end) if not partition_label: if minutely: partition_label = start.strftime('%Y%m%d_%H%M') else: partition_label = start.strftime('%Y%m%d_%H') try: with transaction.atomic(): with connection.cursor() as cursor: cursor.execute( f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} ' f'PARTITION OF {tblname} ' f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');' ) except ProgrammingError as e: logger.debug(f'Caught known error due to existing partition: {e}') ``` ###Assistant : Creates new partition table for events. - start defaults to beginning of current hour - end defaults to end of current hour - partition_label defaults to YYYYMMDD_HH - minutely will create partitions that span _a single minute_ for testing purposes
806
def rbf_kernel(X, Y=None, gamma=None): X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K
Compute the rbf (gaussian) kernel between X and Y. K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <rbf_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) A feature array. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. gamma : float, default=None If None, defaults to 1.0 / n_features. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The RBF kernel.
85
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rbf_kernel(X, Y=None, gamma=None): X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K ``` ###Assistant : Compute the rbf (gaussian) kernel between X and Y. K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <rbf_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) A feature array. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. gamma : float, default=None If None, defaults to 1.0 / n_features. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The RBF kernel.
807
def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None): if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): choices = spec.get('choices') if choices is None: continue if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)): if param in parameters: # Allow one or more when type='list' param with choices if isinstance(parameters[param], list): diff_list = [item for item in parameters[param] if item not in choices] if diff_list: choices_str = ", ".join([to_native(c) for c in choices]) diff_str = ", ".join(diff_list) msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_str) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) elif parameters[param] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. if parameters[param] == 'False': overlap = BOOLEANS_FALSE.intersection(choices) if len(overlap) == 1: # Extract from a set (parameters[param],) = overlap if parameters[param] == 'True': overlap = BOOLEANS_TRUE.intersection(choices) if len(overlap) == 1: (parameters[param],) = overlap if parameters[param] not in choices: choices_str = ", ".join([to_native(c) for c in choices]) msg = "value of %s must be one of: %s, got: %s" % (param, choices_str, parameters[param]) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) else: msg = "internal error: choices for argument %s are not iterable: %s" % (param, choices) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentTypeError(msg))
Ensure all arguments have the requested values, and there are no stray arguments
13
248
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None): if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): choices = spec.get('choices') if choices is None: continue if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)): if param in parameters: # Allow one or more when type='list' param with choices if isinstance(parameters[param], list): diff_list = [item for item in parameters[param] if item not in choices] if diff_list: choices_str = ", ".join([to_native(c) for c in choices]) diff_str = ", ".join(diff_list) msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_str) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) elif parameters[param] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. if parameters[param] == 'False': overlap = BOOLEANS_FALSE.intersection(choices) if len(overlap) == 1: # Extract from a set (parameters[param],) = overlap if parameters[param] == 'True': overlap = BOOLEANS_TRUE.intersection(choices) if len(overlap) == 1: (parameters[param],) = overlap if parameters[param] not in choices: choices_str = ", ".join([to_native(c) for c in choices]) msg = "value of %s must be one of: %s, got: %s" % (param, choices_str, parameters[param]) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentValueError(msg)) else: msg = "internal error: choices for argument %s are not iterable: %s" % (param, choices) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) errors.append(ArgumentTypeError(msg)) ``` ###Assistant : Ensure all arguments have the requested values, and there are no stray arguments
808
def content(self): if self._content is None: self._load() return self._content
The content of the artifact (representation varies)
7
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def content(self): if self._content is None: self._load() return self._content ``` ###Assistant : The content of the artifact (representation varies)
809
def get_product_filter_data(query_args=None): if isinstance(query_args, str): query_args = json.loads(query_args) query_args = frappe._dict(query_args) if query_args: search = query_args.get("search") field_filters = query_args.get("field_filters", {}) attribute_filters = query_args.get("attribute_filters", {}) start = cint(query_args.start) if query_args.get("start") else 0 item_group = query_args.get("item_group") from_filters = query_args.get("from_filters") else: search, attribute_filters, item_group, from_filters = None, None, None, None field_filters = {} start = 0 # if new filter is checked, reset start to show filtered items from page 1 if from_filters: start = 0 sub_categories = [] if item_group: sub_categories = get_child_groups_for_website(item_group, immediate=True) engine = ProductQuery() try: result = engine.query( attribute_filters, field_filters, search_term=search, start=start, item_group=item_group ) except Exception: traceback = frappe.get_traceback() frappe.log_error(traceback, frappe._("Product Engine Error")) return {"exc": "Something went wrong!"} # discount filter data filters = {} discounts = result["discounts"] if discounts: filter_engine = ProductFiltersBuilder() filters["discount_filters"] = filter_engine.get_discount_filters(discounts) return { "items": result["items"] or [], "filters": filters, "settings": engine.settings, "sub_categories": sub_categories, "items_count": result["items_count"] } @frappe.whitelist(allow_guest=True)
Returns filtered products and discount filters. :param query_args (dict): contains filters to get products list Query Args filters: search (str): Search Term. field_filters (dict): Keys include item_group, brand, etc. attribute_filters(dict): Keys include Color, Size, etc. start (int): Offset items by item_group (str): Valid Item Group from_filters (bool): Set as True to jump to page 1
55
143
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_product_filter_data(query_args=None): if isinstance(query_args, str): query_args = json.loads(query_args) query_args = frappe._dict(query_args) if query_args: search = query_args.get("search") field_filters = query_args.get("field_filters", {}) attribute_filters = query_args.get("attribute_filters", {}) start = cint(query_args.start) if query_args.get("start") else 0 item_group = query_args.get("item_group") from_filters = query_args.get("from_filters") else: search, attribute_filters, item_group, from_filters = None, None, None, None field_filters = {} start = 0 # if new filter is checked, reset start to show filtered items from page 1 if from_filters: start = 0 sub_categories = [] if item_group: sub_categories = get_child_groups_for_website(item_group, immediate=True) engine = ProductQuery() try: result = engine.query( attribute_filters, field_filters, search_term=search, start=start, item_group=item_group ) except Exception: traceback = frappe.get_traceback() frappe.log_error(traceback, frappe._("Product Engine Error")) return {"exc": "Something went wrong!"} # discount filter data filters = {} discounts = result["discounts"] if discounts: filter_engine = ProductFiltersBuilder() filters["discount_filters"] = filter_engine.get_discount_filters(discounts) return { "items": result["items"] or [], "filters": filters, "settings": engine.settings, "sub_categories": sub_categories, "items_count": result["items_count"] } @frappe.whitelist(allow_guest=True) ``` ###Assistant : Returns filtered products and discount filters. :param query_args (dict): contains filters to get products list Query Args filters: search (str): Search Term. field_filters (dict): Keys include item_group, brand, etc. attribute_filters(dict): Keys include Color, Size, etc. start (int): Offset items by item_group (str): Valid Item Group from_filters (bool): Set as True to jump to page 1
810
async def async_turn_on(self, **kwargs): # noqa: C901 should_update = False on_command_type = self._config[CONF_ON_COMMAND_TYPE]
Turn the device on. This method is a coroutine.
9
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_turn_on(self, **kwargs): # noqa: C901 should_update = False on_command_type = self._config[CONF_ON_COMMAND_TYPE] ``` ###Assistant : Turn the device on. This method is a coroutine.
811
def test_fake_mac(self, modifiers, expected): seq = keyutils.KeySequence() info = keyutils.KeyInfo(key=Qt.Key.Key_A, modifiers=modifiers) new = seq.append_event(info.to_event()) assert new[0] == keyutils.KeyInfo(Qt.Key.Key_A, expected)
Make sure Control/Meta are swapped with a simulated Mac.
9
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_fake_mac(self, modifiers, expected): seq = keyutils.KeySequence() info = keyutils.KeyInfo(key=Qt.Key.Key_A, modifiers=modifiers) new = seq.append_event(info.to_event()) assert new[0] == keyutils.KeyInfo(Qt.Key.Key_A, expected) ``` ###Assistant : Make sure Control/Meta are swapped with a simulated Mac.
812
def default_params(self) -> dict: return {"order": "asc", "sort": self.sort_key, "limit": self.limit}
Returns the parameters to be sent together with the API call to Recurly
13
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default_params(self) -> dict: return {"order": "asc", "sort": self.sort_key, "limit": self.limit} ``` ###Assistant : Returns the parameters to be sent together with the API call to Recurly
813
def _should_start_new_health_check(self) -> bool: if self._health_check_ref is not None: # There's already an active health check. return False # If there's no active health check, kick off another and reset # the timer if it's been long enough since the last health # check. Add some randomness to avoid synchronizing across all # replicas. time_since_last = time.time() - self._last_health_check_time randomized_period = self._health_check_period_s * random.uniform(0.9, 1.1) return time_since_last > randomized_period
Determines if a new health check should be kicked off. A health check will be started if: 1) There is not already an active health check. 2) It has been more than self._health_check_period_s since the previous health check was *started*. This assumes that self._health_check_ref is reset to `None` when an active health check succeeds or fails (due to returning or timeout).
61
69
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _should_start_new_health_check(self) -> bool: if self._health_check_ref is not None: # There's already an active health check. return False # If there's no active health check, kick off another and reset # the timer if it's been long enough since the last health # check. Add some randomness to avoid synchronizing across all # replicas. time_since_last = time.time() - self._last_health_check_time randomized_period = self._health_check_period_s * random.uniform(0.9, 1.1) return time_since_last > randomized_period ``` ###Assistant : Determines if a new health check should be kicked off. A health check will be started if: 1) There is not already an active health check. 2) It has been more than self._health_check_period_s since the previous health check was *started*. This assumes that self._health_check_ref is reset to `None` when an active health check succeeds or fails (due to returning or timeout).
814
def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result
Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.
14
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result ``` ###Assistant : Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.
815
def _ray(self) -> "ray": global ray if ray is None: try: import ray except ImportError as exc: raise RuntimeError( "Using the `RayTaskRunner` requires `ray` to be installed." ) from exc return ray
Delayed import of `ray` allowing configuration of the task runner without the extra installed and improves `prefect` import times.
19
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ray(self) -> "ray": global ray if ray is None: try: import ray except ImportError as exc: raise RuntimeError( "Using the `RayTaskRunner` requires `ray` to be installed." ) from exc return ray ``` ###Assistant : Delayed import of `ray` allowing configuration of the task runner without the extra installed and improves `prefect` import times.
816
def temperature_unit(self) -> str: if ( self._unit_value and self._unit_value.metadata.unit and "f" in self._unit_value.metadata.unit.lower() ): return UnitOfTemperature.FAHRENHEIT return UnitOfTemperature.CELSIUS
Return the unit of measurement used by the platform.
9
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def temperature_unit(self) -> str: if ( self._unit_value and self._unit_value.metadata.unit and "f" in self._unit_value.metadata.unit.lower() ): return UnitOfTemperature.FAHRENHEIT return UnitOfTemperature.CELSIUS ``` ###Assistant : Return the unit of measurement used by the platform.
817
def get_point_of_rotated_vertical(origin, line_length, degrees): rad = np.deg2rad(-degrees) return [origin[0] + line_length * np.sin(rad), origin[1] + line_length * np.cos(rad)] fig, ax = plt.subplots(figsize=(8, 7)) ax.set(xlim=(0, 6), ylim=(-1, 4)) ax.set_title("Orientation of the bracket arrows relative to angleA and angleB") for i, style in enumerate(["]-[", "|-|"]): for j, angle in enumerate([-40, 60]): y = 2*i + j arrow_centers = ((1, y), (5, y)) vlines = ((1, y + 0.5), (5, y + 0.5)) anglesAB = (angle, -angle) bracketstyle = f"{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}" bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle, mutation_scale=42) ax.add_patch(bracket) ax.text(3, y + 0.05, bracketstyle, ha="center", va="bottom") ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines], linestyles="--", color="C0") # Get the top coordinates for the drawn patches at A and B patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle) for center, angle in zip(arrow_centers, anglesAB)] # Define the connection directions for the annotation arrows connection_dirs = (1, -1) if angle > 0 else (-1, 1) # Add arrows and annotation text arrowstyle = "Simple, tail_width=0.5, head_width=4, head_length=8" for vline, dir, patch_top, angle in zip(vlines, connection_dirs, patch_tops, anglesAB): kw = dict(connectionstyle=f"arc3,rad={dir * 0.5}", arrowstyle=arrowstyle, color="C0") ax.add_patch(FancyArrowPatch(vline, patch_top, **kw)) ax.text(vline[0] - dir * 0.15, y + 0.3, f'{angle}°', ha="center", va="center") ############################################################################# # # .. admonition:: References # # The use of the following functions, methods, classes and modules is shown # in this example: # # - `matplotlib.patches.ArrowStyle`
Return xy coordinates of the vertical line end rotated by degrees.
11
221
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_point_of_rotated_vertical(origin, line_length, degrees): rad = np.deg2rad(-degrees) return [origin[0] + line_length * np.sin(rad), origin[1] + line_length * np.cos(rad)] fig, ax = plt.subplots(figsize=(8, 7)) ax.set(xlim=(0, 6), ylim=(-1, 4)) ax.set_title("Orientation of the bracket arrows relative to angleA and angleB") for i, style in enumerate(["]-[", "|-|"]): for j, angle in enumerate([-40, 60]): y = 2*i + j arrow_centers = ((1, y), (5, y)) vlines = ((1, y + 0.5), (5, y + 0.5)) anglesAB = (angle, -angle) bracketstyle = f"{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}" bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle, mutation_scale=42) ax.add_patch(bracket) ax.text(3, y + 0.05, bracketstyle, ha="center", va="bottom") ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines], linestyles="--", color="C0") # Get the top coordinates for the drawn patches at A and B patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle) for center, angle in zip(arrow_centers, anglesAB)] # Define the connection directions for the annotation arrows connection_dirs = (1, -1) if angle > 0 else (-1, 1) # Add arrows and annotation text arrowstyle = "Simple, tail_width=0.5, head_width=4, head_length=8" for vline, dir, patch_top, angle in zip(vlines, connection_dirs, patch_tops, anglesAB): kw = dict(connectionstyle=f"arc3,rad={dir * 0.5}", arrowstyle=arrowstyle, color="C0") ax.add_patch(FancyArrowPatch(vline, patch_top, **kw)) ax.text(vline[0] - dir * 0.15, y + 0.3, f'{angle}°', ha="center", va="center") ############################################################################# # # .. admonition:: References # # The use of the following functions, methods, classes and modules is shown # in this example: # # - `matplotlib.patches.ArrowStyle` ``` ###Assistant : Return xy coordinates of the vertical line end rotated by degrees.
818
def processEntityInAttribute(self, allowedChar): self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
This method replaces the need for "entityInAttributeValueState".
7
5
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def processEntityInAttribute(self, allowedChar): self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) ``` ###Assistant : This method replaces the need for "entityInAttributeValueState".
819
def _get_ordered_dummies(mul, verbose=False): # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' dum_repr = {d: __kprint(d) for d in all_dums}
Returns all dummies in the mul sorted in canonical order. Explanation =========== The purpose of the canonical ordering is that dummies can be substituted consistently across terms with the result that equivalent terms can be simplified. It is not possible to determine if two terms are equivalent based solely on the dummy order. However, a consistent substitution guided by the ordered dummies should lead to trivially (non-)equivalent terms, thereby revealing the equivalence. This also means that if two terms have identical sequences of dummies, the (non-)equivalence should already be apparent. Strategy -------- The canonical order is given by an arbitrary sorting rule. A sort key is determined for each dummy as a tuple that depends on all factors where the index is present. The dummies are thereby sorted according to the contraction structure of the term, instead of sorting based solely on the dummy symbol itself. After all dummies in the term has been assigned a key, we check for identical keys, i.e. unorderable dummies. If any are found, we call a specialized method, _determine_ambiguous(), that will determine a unique order based on recursive calls to _get_ordered_dummies(). Key description --------------- A high level description of the sort key: 1. Range of the dummy index 2. Relation to external (non-dummy) indices 3. Position of the index in the first factor 4. Position of the index in the second factor The sort key is a tuple with the following components: 1. A single character indicating the range of the dummy (above, below or general.) 2. A list of strings with fully masked string representations of all factors where the dummy is present. By masked, we mean that dummies are represented by a symbol to indicate either below fermi, above or general. No other information is displayed about the dummies at this point. The list is sorted stringwise. 3. An integer number indicating the position of the index, in the first factor as sorted in 2. 4. An integer number indicating the position of the index, in the second factor as sorted in 2. If a factor is either of type AntiSymmetricTensor or SqOperator, the index position in items 3 and 4 is indicated as 'upper' or 'lower' only. (Creation operators are considered upper and annihilation operators lower.) If the masked factors are identical, the two factors cannot be ordered unambiguously in item 2. In this case, items 3, 4 are left out. If several indices are contracted between the unorderable factors, it will be handled by _determine_ambiguous()
415
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_ordered_dummies(mul, verbose=False): # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' dum_repr = {d: __kprint(d) for d in all_dums} ``` ###Assistant : Returns all dummies in the mul sorted in canonical order. Explanation =========== The purpose of the canonical ordering is that dummies can be substituted consistently across terms with the result that equivalent terms can be simplified. It is not possible to determine if two terms are equivalent based solely on the dummy order. However, a consistent substitution guided by the ordered dummies should lead to trivially (non-)equivalent terms, thereby revealing the equivalence. This also means that if two terms have identical sequences of dummies, the (non-)equivalence should already be apparent. Strategy -------- The canonical order is given by an arbitrary sorting rule. A sort key is determined for each dummy as a tuple that depends on all factors where the index is present. The dummies are thereby sorted according to the contraction structure of the term, instead of sorting based solely on the dummy symbol itself. After all dummies in the term has been assigned a key, we check for identical keys, i.e. unorderable dummies. If any are found, we call a specialized method, _determine_ambiguous(), that will determine a unique order based on recursive calls to _get_ordered_dummies(). Key description --------------- A high level description of the sort key: 1. Range of the dummy index 2. Relation to external (non-dummy) indices 3. Position of the index in the first factor 4. Position of the index in the second factor The sort key is a tuple with the following components: 1. A single character indicating the range of the dummy (above, below or general.) 2. A list of strings with fully masked string representations of all factors where the dummy is present. By masked, we mean that dummies are represented by a symbol to indicate either below fermi, above or general. No other information is displayed about the dummies at this point. The list is sorted stringwise. 3. An integer number indicating the position of the index, in the first factor as sorted in 2. 4. An integer number indicating the position of the index, in the second factor as sorted in 2. If a factor is either of type AntiSymmetricTensor or SqOperator, the index position in items 3 and 4 is indicated as 'upper' or 'lower' only. (Creation operators are considered upper and annihilation operators lower.) If the masked factors are identical, the two factors cannot be ordered unambiguously in item 2. In this case, items 3, 4 are left out. If several indices are contracted between the unorderable factors, it will be handled by _determine_ambiguous()
820
def read_results_from_s3(query_execution_id): s3_hook = S3Hook() file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv') file_content = file_obj['Body'].read().decode('utf-8') print(file_content) QUERY_CREATE_TABLE = f QUERY_READ_TABLE = f QUERY_DROP_TABLE = f with DAG( dag_id='example_athena', schedule_interval=None, start_date=datetime(2021, 1, 1), tags=['example'], catchup=False, ) as dag: upload_sample_data = S3CreateObjectOperator( task_id='upload_sample_data', s3_bucket=S3_BUCKET, s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}', data=SAMPLE_DATA, replace=True, ) create_table = AthenaOperator( task_id='create_table', query=QUERY_CREATE_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [START howto_athena_operator] read_table = AthenaOperator( task_id='read_table', query=QUERY_READ_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [END howto_athena_operator] # [START howto_athena_sensor] await_query = AthenaSensor( task_id='await_query', query_execution_id=read_table.output, ) # [END howto_athena_sensor] drop_table = AthenaOperator( task_id='drop_table', query=QUERY_DROP_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) remove_s3_files = S3DeleteObjectsOperator( task_id='remove_s3_files', bucket=S3_BUCKET, prefix=S3_KEY, ) ( upload_sample_data >> create_table >> read_table >> await_query >> read_results_from_s3(read_table.output) >> drop_table >> remove_s3_files )
CREATE EXTERNAL TABLE IF NOT EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} ( `name` string, `age` int ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'serialization.format' = ',', 'field.delim' = ',' ) LOCATION 's3://{S3_BUCKET}/{S3_KEY}/{ATHENA_TABLE}' TBLPROPERTIES ('has_encrypted_data'='false') SELECT * from {ATHENA_DATABASE}.{ATHENA_TABLE} DROP TABLE IF EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE}
40
107
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_results_from_s3(query_execution_id): s3_hook = S3Hook() file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv') file_content = file_obj['Body'].read().decode('utf-8') print(file_content) QUERY_CREATE_TABLE = f QUERY_READ_TABLE = f QUERY_DROP_TABLE = f with DAG( dag_id='example_athena', schedule_interval=None, start_date=datetime(2021, 1, 1), tags=['example'], catchup=False, ) as dag: upload_sample_data = S3CreateObjectOperator( task_id='upload_sample_data', s3_bucket=S3_BUCKET, s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}', data=SAMPLE_DATA, replace=True, ) create_table = AthenaOperator( task_id='create_table', query=QUERY_CREATE_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [START howto_athena_operator] read_table = AthenaOperator( task_id='read_table', query=QUERY_READ_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [END howto_athena_operator] # [START howto_athena_sensor] await_query = AthenaSensor( task_id='await_query', query_execution_id=read_table.output, ) # [END howto_athena_sensor] drop_table = AthenaOperator( task_id='drop_table', query=QUERY_DROP_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) remove_s3_files = S3DeleteObjectsOperator( task_id='remove_s3_files', bucket=S3_BUCKET, prefix=S3_KEY, ) ( upload_sample_data >> create_table >> read_table >> await_query >> read_results_from_s3(read_table.output) >> drop_table >> remove_s3_files ) ``` ###Assistant : CREATE EXTERNAL TABLE IF NOT EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} ( `name` string, `age` int ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'serialization.format' = ',', 'field.delim' = ',' ) LOCATION 's3://{S3_BUCKET}/{S3_KEY}/{ATHENA_TABLE}' TBLPROPERTIES ('has_encrypted_data'='false') SELECT * from {ATHENA_DATABASE}.{ATHENA_TABLE} DROP TABLE IF EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE}
821
def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or ( isinstance(tipo, DatetimeTZDtype) and tipo._unit == "ns" )
Check whether the provided array or dtype is of the datetime64[ns] dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- bool Whether or not the array or dtype is of the datetime64[ns] dtype. Examples -------- >>> is_datetime64_ns_dtype(str) False >>> is_datetime64_ns_dtype(int) False >>> is_datetime64_ns_dtype(np.datetime64) # no unit False >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern")) True >>> is_datetime64_ns_dtype(np.array(['a', 'b'])) False >>> is_datetime64_ns_dtype(np.array([1, 2])) False >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit False >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit False >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")) True
86
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or ( isinstance(tipo, DatetimeTZDtype) and tipo._unit == "ns" ) ``` ###Assistant : Check whether the provided array or dtype is of the datetime64[ns] dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- bool Whether or not the array or dtype is of the datetime64[ns] dtype. Examples -------- >>> is_datetime64_ns_dtype(str) False >>> is_datetime64_ns_dtype(int) False >>> is_datetime64_ns_dtype(np.datetime64) # no unit False >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern")) True >>> is_datetime64_ns_dtype(np.array(['a', 'b'])) False >>> is_datetime64_ns_dtype(np.array([1, 2])) False >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit False >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit False >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")) True
822
def regroup(parser, token): bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") if bits[4] != 'as': raise TemplateSyntaxError( "next-to-last argument to 'regroup' tag must be 'as'" ) var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag
Regroup a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``musicians`` is a list of ``Musician`` objects that have ``name`` and ``instrument`` attributes, and you'd like to display a list that looks like: * Guitar: * Django Reinhardt * Emily Remler * Piano: * Lovie Austin * Bud Powell * Trumpet: * Duke Ellington The following snippet of template code would accomplish this dubious task:: {% regroup musicians by instrument as grouped %} <ul> {% for group in grouped %} <li>{{ group.grouper }} <ul> {% for musician in group.list %} <li>{{ musician.name }}</li> {% endfor %} </ul> {% endfor %} </ul> As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano`` and ``Trumpet``, and ``list`` is the list of musicians who play this instrument. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of musicians was not sorted by instrument, you'd need to make sure it is sorted before using it, i.e.:: {% regroup musicians|dictsort:"instrument" by instrument as grouped %}
230
131
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def regroup(parser, token): bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") if bits[4] != 'as': raise TemplateSyntaxError( "next-to-last argument to 'regroup' tag must be 'as'" ) var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag ``` ###Assistant : Regroup a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``musicians`` is a list of ``Musician`` objects that have ``name`` and ``instrument`` attributes, and you'd like to display a list that looks like: * Guitar: * Django Reinhardt * Emily Remler * Piano: * Lovie Austin * Bud Powell * Trumpet: * Duke Ellington The following snippet of template code would accomplish this dubious task:: {% regroup musicians by instrument as grouped %} <ul> {% for group in grouped %} <li>{{ group.grouper }} <ul> {% for musician in group.list %} <li>{{ musician.name }}</li> {% endfor %} </ul> {% endfor %} </ul> As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano`` and ``Trumpet``, and ``list`` is the list of musicians who play this instrument. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of musicians was not sorted by instrument, you'd need to make sure it is sorted before using it, i.e.:: {% regroup musicians|dictsort:"instrument" by instrument as grouped %}
823
def create_basic_list(cls) -> "Saved": metadata = cls.get_metadata("saved") urls = cls.get_urls("saved") return cls(**metadata, urls=urls, songs=[])
Create a basic list with only the required metadata and urls. ### Returns - The Saved object.
17
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_basic_list(cls) -> "Saved": metadata = cls.get_metadata("saved") urls = cls.get_urls("saved") return cls(**metadata, urls=urls, songs=[]) ``` ###Assistant : Create a basic list with only the required metadata and urls. ### Returns - The Saved object.
824
def assert_lists_same(a, b): assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a
Compare two lists, ignoring order. Check both that all items in a are in b and that all items in b are in a, otherwise assert_lists_same(["1", "1"], ["1", "2"]) could be True.
32
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assert_lists_same(a, b): assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a ``` ###Assistant : Compare two lists, ignoring order. Check both that all items in a are in b and that all items in b are in a, otherwise assert_lists_same(["1", "1"], ["1", "2"]) could be True.
825
def test_dataframe_format_with_index(): pytest.importorskip("jinja2") df = pd.DataFrame( { "A": [1, 2, 3, 4, 5, 6, 7, 8], "B": list("ABCDEFGH"), "C": pd.Categorical(list("AAABBBCC")), }, index=list("ABCDEFGH"), ) ddf = dd.from_pandas(df, 3) exp = ( "Dask DataFrame Structure:\n" " A B C\n" "npartitions=3 \n" "A int64 object category[known]\n" "D ... ... ...\n" "G ... ... ...\n" "H ... ... ...\n" "Dask Name: from_pandas, 1 graph layer" ) assert repr(ddf) == exp assert str(ddf) == exp exp_table = exp = .format( exp_table=exp_table ) assert ddf.to_html() == exp # table is boxed with div and has style exp = .format( style=style, exp_table=exp_table ) assert ddf._repr_html_() == exp
<table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>A</th> <th>B</th> <th>C</th> </tr> <tr> <th>npartitions=3</th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>A</th> <td>int64</td> <td>object</td> <td>category[known]</td> </tr> <tr> <th>D</th> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>G</th> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>H</th> <td>...</td> <td>...</td> <td>...</td> </tr> </tbody> </table><div><strong>Dask DataFrame Structure:</strong></div> {exp_table} <div>Dask Name: from_pandas, 1 graph layer</div><div><strong>Dask DataFrame Structure:</strong></div> <div> {style}{exp_table} </div> <div>Dask Name: from_pandas, 1 graph layer</div>
66
100
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dataframe_format_with_index(): pytest.importorskip("jinja2") df = pd.DataFrame( { "A": [1, 2, 3, 4, 5, 6, 7, 8], "B": list("ABCDEFGH"), "C": pd.Categorical(list("AAABBBCC")), }, index=list("ABCDEFGH"), ) ddf = dd.from_pandas(df, 3) exp = ( "Dask DataFrame Structure:\n" " A B C\n" "npartitions=3 \n" "A int64 object category[known]\n" "D ... ... ...\n" "G ... ... ...\n" "H ... ... ...\n" "Dask Name: from_pandas, 1 graph layer" ) assert repr(ddf) == exp assert str(ddf) == exp exp_table = exp = .format( exp_table=exp_table ) assert ddf.to_html() == exp # table is boxed with div and has style exp = .format( style=style, exp_table=exp_table ) assert ddf._repr_html_() == exp ``` ###Assistant : <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>A</th> <th>B</th> <th>C</th> </tr> <tr> <th>npartitions=3</th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>A</th> <td>int64</td> <td>object</td> <td>category[known]</td> </tr> <tr> <th>D</th> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>G</th> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>H</th> <td>...</td> <td>...</td> <td>...</td> </tr> </tbody> </table><div><strong>Dask DataFrame Structure:</strong></div> {exp_table} <div>Dask Name: from_pandas, 1 graph layer</div><div><strong>Dask DataFrame Structure:</strong></div> <div> {style}{exp_table} </div> <div>Dask Name: from_pandas, 1 graph layer</div>
826
def test_run_from_argv_closes_connections(self): command = BaseCommand() command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch("django.core.management.base.connections") as mock_connections: command.run_from_argv(["", ""]) # Test connections have been closed self.assertTrue(mock_connections.close_all.called)
A command called from the command line should close connections after being executed (#21255).
14
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_run_from_argv_closes_connections(self): command = BaseCommand() command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch("django.core.management.base.connections") as mock_connections: command.run_from_argv(["", ""]) # Test connections have been closed self.assertTrue(mock_connections.close_all.called) ``` ###Assistant : A command called from the command line should close connections after being executed (#21255).
827
def test_version_managing(self, data_handler): # set up df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(data_handler, name='pg', tables={'tasks': df}) # ================= retrain cycles ===================== # create folder self.run_sql('create database proj') # -- create model -- self.run_sql( ) self.wait_predictor('proj', 'task_model') assert data_handler().native_query.call_args[0][0] == 'select * from tasks' # tag works in create model ret = self.run_sql('select * from proj.models') assert ret['TAG'][0] == 'first' # use model ret = self.run_sql() assert len(ret) == 3 assert ret.predicted[0] == 42 # -- retrain predictor with tag -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'second'}) # get current model ret = self.run_sql('select * from proj.models') # check target assert ret['PREDICT'][0] == 'b' # check label assert ret['TAG'][0] == 'second' # check integration sql assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2' # use model ret = self.run_sql() assert ret.predicted[0] == 42 # used model has tag 'second' models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # -- retrain again with active=0 -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'third'}) ret = self.run_sql('select * from proj.models') # check target is from previous retrain assert ret['PREDICT'][0] == 'b' # use model ret = self.run_sql() # used model has tag 'second' (previous) models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # ================ working with inactive versions ================= # run 3st version model and check used model version ret = self.run_sql() models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # one-line query model by version ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2') model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # not existing version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.4 where a=1 and b=2', ) assert 'does not exists' in str(exc_info.value) # ================== managing versions ========================= # show models command # Show models <from | in> <project> where <expr> ret = self.run_sql('Show models') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models from proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models in proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models where name='task_model'") assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models from proj where name='xxx'") assert len(ret) == 0 # ---------------- # See all versions ret = self.run_sql('select * from proj.models_versions') # we have all tags in versions assert set(ret['TAG']) == {'first', 'second', 'third'} # Set active selected version self.run_sql() # get active version ret = self.run_sql('select * from proj.models_versions where active = 1') assert ret['TAG'][0] == 'first' # use active version ? # Delete specific version self.run_sql() # deleted version not in list ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 2 assert 'second' not in ret['TAG'] # try to use deleted version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.2 where a=1', ) assert 'does not exists' in str(exc_info.value) # exception with deleting active version with pytest.raises(Exception) as exc_info: self.run_sql() assert 'is not found' in str(exc_info.value) # drop predictor and check model is deleted and no versions self.run_sql('drop predictor proj.task_model') ret = self.run_sql('select * from proj.models') assert len(ret) == 0 ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 0
CREATE PREDICTOR proj.task_model from pg (select * from tasks) PREDICT a using engine='dummy_ml', tag = 'first' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT b using tag = 'second' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT a using tag='third', active=0 SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m SELECT m.* FROM pg.tasks as t JOIN proj.task_model.3 as m update proj.models_versions set active=1 where version=1 and name='task_model' delete from proj.models_versions where version=2 and name='task_model' delete from proj.models_versions where version=3 and model='task_model'
109
536
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_version_managing(self, data_handler): # set up df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(data_handler, name='pg', tables={'tasks': df}) # ================= retrain cycles ===================== # create folder self.run_sql('create database proj') # -- create model -- self.run_sql( ) self.wait_predictor('proj', 'task_model') assert data_handler().native_query.call_args[0][0] == 'select * from tasks' # tag works in create model ret = self.run_sql('select * from proj.models') assert ret['TAG'][0] == 'first' # use model ret = self.run_sql() assert len(ret) == 3 assert ret.predicted[0] == 42 # -- retrain predictor with tag -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'second'}) # get current model ret = self.run_sql('select * from proj.models') # check target assert ret['PREDICT'][0] == 'b' # check label assert ret['TAG'][0] == 'second' # check integration sql assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2' # use model ret = self.run_sql() assert ret.predicted[0] == 42 # used model has tag 'second' models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # -- retrain again with active=0 -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'third'}) ret = self.run_sql('select * from proj.models') # check target is from previous retrain assert ret['PREDICT'][0] == 'b' # use model ret = self.run_sql() # used model has tag 'second' (previous) models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # ================ working with inactive versions ================= # run 3st version model and check used model version ret = self.run_sql() models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # one-line query model by version ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2') model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # not existing version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.4 where a=1 and b=2', ) assert 'does not exists' in str(exc_info.value) # ================== managing versions ========================= # show models command # Show models <from | in> <project> where <expr> ret = self.run_sql('Show models') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models from proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models in proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models where name='task_model'") assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models from proj where name='xxx'") assert len(ret) == 0 # ---------------- # See all versions ret = self.run_sql('select * from proj.models_versions') # we have all tags in versions assert set(ret['TAG']) == {'first', 'second', 'third'} # Set active selected version self.run_sql() # get active version ret = self.run_sql('select * from proj.models_versions where active = 1') assert ret['TAG'][0] == 'first' # use active version ? # Delete specific version self.run_sql() # deleted version not in list ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 2 assert 'second' not in ret['TAG'] # try to use deleted version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.2 where a=1', ) assert 'does not exists' in str(exc_info.value) # exception with deleting active version with pytest.raises(Exception) as exc_info: self.run_sql() assert 'is not found' in str(exc_info.value) # drop predictor and check model is deleted and no versions self.run_sql('drop predictor proj.task_model') ret = self.run_sql('select * from proj.models') assert len(ret) == 0 ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 0 ``` ###Assistant : CREATE PREDICTOR proj.task_model from pg (select * from tasks) PREDICT a using engine='dummy_ml', tag = 'first' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT b using tag = 'second' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT a using tag='third', active=0 SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m SELECT m.* FROM pg.tasks as t JOIN proj.task_model.3 as m update proj.models_versions set active=1 where version=1 and name='task_model' delete from proj.models_versions where version=2 and name='task_model' delete from proj.models_versions where version=3 and model='task_model'
828
def _looks_like_red_hat_lib() -> bool: from distutils.command.install import INSTALL_SCHEMES # type: ignore return all( k in INSTALL_SCHEMES and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) for k in ("unix_prefix", "unix_home") ) @functools.lru_cache(maxsize=None)
Red Hat patches platlib in unix_prefix and unix_home, but not purelib. This is the only way I can see to tell a Red Hat-patched Python.
25
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _looks_like_red_hat_lib() -> bool: from distutils.command.install import INSTALL_SCHEMES # type: ignore return all( k in INSTALL_SCHEMES and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) for k in ("unix_prefix", "unix_home") ) @functools.lru_cache(maxsize=None) ``` ###Assistant : Red Hat patches platlib in unix_prefix and unix_home, but not purelib. This is the only way I can see to tell a Red Hat-patched Python.
829
def get_valid_parent_pages(self, user): # Get queryset of pages where this page type can be added allowed_parent_page_content_types = list( ContentType.objects.get_for_models( *self.model.allowed_parent_page_models() ).values() ) allowed_parent_pages = Page.objects.filter( content_type__in=allowed_parent_page_content_types ) # Get queryset of pages where the user has permission to add subpages if user.is_superuser: pages_where_user_can_add = Page.objects.all() else: pages_where_user_can_add = Page.objects.none() user_perms = UserPagePermissionsProxy(user) for perm in user_perms.permissions.filter(permission_type="add"): # user has add permission on any subpage of perm.page # (including perm.page itself) pages_where_user_can_add |= Page.objects.descendant_of( perm.page, inclusive=True ) # Combine them return allowed_parent_pages & pages_where_user_can_add
Identifies possible parent pages for the current user by first looking at allowed_parent_page_models() on self.model to limit options to the correct type of page, then checking permissions on those individual pages to make sure we have permission to add a subpage to it.
43
83
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_valid_parent_pages(self, user): # Get queryset of pages where this page type can be added allowed_parent_page_content_types = list( ContentType.objects.get_for_models( *self.model.allowed_parent_page_models() ).values() ) allowed_parent_pages = Page.objects.filter( content_type__in=allowed_parent_page_content_types ) # Get queryset of pages where the user has permission to add subpages if user.is_superuser: pages_where_user_can_add = Page.objects.all() else: pages_where_user_can_add = Page.objects.none() user_perms = UserPagePermissionsProxy(user) for perm in user_perms.permissions.filter(permission_type="add"): # user has add permission on any subpage of perm.page # (including perm.page itself) pages_where_user_can_add |= Page.objects.descendant_of( perm.page, inclusive=True ) # Combine them return allowed_parent_pages & pages_where_user_can_add ``` ###Assistant : Identifies possible parent pages for the current user by first looking at allowed_parent_page_models() on self.model to limit options to the correct type of page, then checking permissions on those individual pages to make sure we have permission to add a subpage to it.
830
def get_attendance_list(conditions, filters): attendance_list = frappe.db.sql( % conditions, filters, as_dict=1, ) if not attendance_list: msgprint(_("No attendance record found"), alert=True, indicator="orange") att_map = {} for d in attendance_list: att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, "") att_map[d.employee][d.day_of_month] = d.status return att_map
select employee, day(attendance_date) as day_of_month, status from tabAttendance where docstatus = 1 %s order by employee, attendance_date
17
35
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_attendance_list(conditions, filters): attendance_list = frappe.db.sql( % conditions, filters, as_dict=1, ) if not attendance_list: msgprint(_("No attendance record found"), alert=True, indicator="orange") att_map = {} for d in attendance_list: att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, "") att_map[d.employee][d.day_of_month] = d.status return att_map ``` ###Assistant : select employee, day(attendance_date) as day_of_month, status from tabAttendance where docstatus = 1 %s order by employee, attendance_date
831
def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) return reordered_past @add_start_docstrings( , REMBERT_START_DOCSTRING, )
RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
14
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) return reordered_past @add_start_docstrings( , REMBERT_START_DOCSTRING, ) ``` ###Assistant : RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
832
def test_remove_as_admin_not_in_team(self): # an org with closed membership (byproduct of flags=0) org = self.create_organization(owner=self.user, flags=0) team = self.create_team(organization=org) admin_user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member( organization=org, user=admin_user, role="admin", teams=[], # note that admin_user isn't a member of `team` ) self.login_as(admin_user) # first, try deleting the team with open membership off self.get_error_response(team.organization.slug, team.slug, status_code=403) self.assert_team_not_deleted(team.id) # now, with open membership on org.flags.allow_joinleave = True org.save() self.get_success_response(team.organization.slug, team.slug, status_code=204) self.assert_team_deleted(team.id)
Admins can't remove teams of which they're not a part, unless open membership is on.
15
66
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_remove_as_admin_not_in_team(self): # an org with closed membership (byproduct of flags=0) org = self.create_organization(owner=self.user, flags=0) team = self.create_team(organization=org) admin_user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member( organization=org, user=admin_user, role="admin", teams=[], # note that admin_user isn't a member of `team` ) self.login_as(admin_user) # first, try deleting the team with open membership off self.get_error_response(team.organization.slug, team.slug, status_code=403) self.assert_team_not_deleted(team.id) # now, with open membership on org.flags.allow_joinleave = True org.save() self.get_success_response(team.organization.slug, team.slug, status_code=204) self.assert_team_deleted(team.id) ``` ###Assistant : Admins can't remove teams of which they're not a part, unless open membership is on.
833
def batch_test(num_threads, delay): with mock.patch( "ray.autoscaler._private.aws.node_provider.make_ec2_client" ), mock.patch.object(AWSNodeProvider, "_create_tags", mock_create_tags): provider = AWSNodeProvider( provider_config={"region": "nowhere"}, cluster_name="default" ) provider.batch_counter = 0 provider.tag_update_counter = 0 provider.tag_cache = {str(x): {} for x in range(num_threads)} threads = [] for x in range(num_threads): thread = threading.Thread( target=provider.set_node_tags, args=(str(x), {"foo": "bar"}) ) threads.append(thread) for thread in threads: thread.start() time.sleep(delay) for thread in threads: thread.join() return provider.batch_counter, provider.tag_update_counter
Run AWSNodeProvider.set_node_tags in several threads, with a specified delay between thread launches. Return the number of batches of tag updates and the number of tags updated.
26
61
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batch_test(num_threads, delay): with mock.patch( "ray.autoscaler._private.aws.node_provider.make_ec2_client" ), mock.patch.object(AWSNodeProvider, "_create_tags", mock_create_tags): provider = AWSNodeProvider( provider_config={"region": "nowhere"}, cluster_name="default" ) provider.batch_counter = 0 provider.tag_update_counter = 0 provider.tag_cache = {str(x): {} for x in range(num_threads)} threads = [] for x in range(num_threads): thread = threading.Thread( target=provider.set_node_tags, args=(str(x), {"foo": "bar"}) ) threads.append(thread) for thread in threads: thread.start() time.sleep(delay) for thread in threads: thread.join() return provider.batch_counter, provider.tag_update_counter ``` ###Assistant : Run AWSNodeProvider.set_node_tags in several threads, with a specified delay between thread launches. Return the number of batches of tag updates and the number of tags updated.
834
def forward(self, feats, img_metas): y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_fpn = self.lateral_convs[i](x) y = cur_fpn + \ F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module()
Args: feats (list[Tensor]): Feature maps of each level. Each has shape of (batch_size, c, h, w). img_metas (list[dict]): List of image information. Pass in for creating more accurate padding mask. Not used here. Returns: tuple: a tuple containing the following: - mask_feature (Tensor): Shape (batch_size, c, h, w). - memory (Tensor): Output of last stage of backbone.\ Shape (batch_size, c, h, w).
62
42
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, feats, img_metas): y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_fpn = self.lateral_convs[i](x) y = cur_fpn + \ F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module() ``` ###Assistant : Args: feats (list[Tensor]): Feature maps of each level. Each has shape of (batch_size, c, h, w). img_metas (list[dict]): List of image information. Pass in for creating more accurate padding mask. Not used here. Returns: tuple: a tuple containing the following: - mask_feature (Tensor): Shape (batch_size, c, h, w). - memory (Tensor): Output of last stage of backbone.\ Shape (batch_size, c, h, w).
835
async def test_remote_scanner_expires_non_connectable(hass): manager = _get_manager() switchbot_device = BLEDevice( "44:44:33:11:23:45", "wohand", {}, rssi=-100, ) switchbot_device_adv = generate_advertisement_data( local_name="wohand", service_uuids=[], manufacturer_data={1: b"\x01"}, rssi=-100, )
Test the remote scanner expires stale non connectable data.
9
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_remote_scanner_expires_non_connectable(hass): manager = _get_manager() switchbot_device = BLEDevice( "44:44:33:11:23:45", "wohand", {}, rssi=-100, ) switchbot_device_adv = generate_advertisement_data( local_name="wohand", service_uuids=[], manufacturer_data={1: b"\x01"}, rssi=-100, ) ``` ###Assistant : Test the remote scanner expires stale non connectable data.
836
def convert_yaml_objects_to_native(obj): if isinstance(obj, dict): return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items()) elif isinstance(obj, list): return [convert_yaml_objects_to_native(v) for v in obj] elif isinstance(obj, text_type): return text_type(obj) else: return obj
Older versions of the ``toml`` python library, and tomllib, don't have a pluggable way to tell the encoder about custom types, so we need to ensure objects that we pass are native types. Used with: - ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing - ``tomli`` or ``tomllib`` This function recurses an object and ensures we cast any of the types from ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing the data before we hand it over to the toml library. This function doesn't directly check for the types from ``ansible.parsing.yaml.objects`` but instead checks for the types those objects inherit from, to offer more flexibility.
101
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_yaml_objects_to_native(obj): if isinstance(obj, dict): return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items()) elif isinstance(obj, list): return [convert_yaml_objects_to_native(v) for v in obj] elif isinstance(obj, text_type): return text_type(obj) else: return obj ``` ###Assistant : Older versions of the ``toml`` python library, and tomllib, don't have a pluggable way to tell the encoder about custom types, so we need to ensure objects that we pass are native types. Used with: - ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing - ``tomli`` or ``tomllib`` This function recurses an object and ensures we cast any of the types from ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing the data before we hand it over to the toml library. This function doesn't directly check for the types from ``ansible.parsing.yaml.objects`` but instead checks for the types those objects inherit from, to offer more flexibility.
837
def make_future_dataframe(self, periods, freq='D', include_history=True): if self.history_dates is None: raise Exception('Model has not been fit.') if freq is None: # taking the tail makes freq inference more reliable freq = pd.infer_freq(self.history_dates.tail(5)) # returns None if inference failed if freq is None: raise Exception('Unable to infer `freq`') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:periods] # Return correct number of periods if include_history: dates = np.concatenate((np.array(self.history_dates), dates)) return pd.DataFrame({'ds': dates})
Simulate the trend using the extrapolated generative model. Parameters ---------- periods: Int number of periods to forecast forward. freq: Any valid frequency for pd.date_range, such as 'D' or 'M'. include_history: Boolean to include the historical dates in the data frame for predictions. Returns ------- pd.Dataframe that extends forward from the end of self.history for the requested number of periods.
59
94
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_future_dataframe(self, periods, freq='D', include_history=True): if self.history_dates is None: raise Exception('Model has not been fit.') if freq is None: # taking the tail makes freq inference more reliable freq = pd.infer_freq(self.history_dates.tail(5)) # returns None if inference failed if freq is None: raise Exception('Unable to infer `freq`') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:periods] # Return correct number of periods if include_history: dates = np.concatenate((np.array(self.history_dates), dates)) return pd.DataFrame({'ds': dates}) ``` ###Assistant : Simulate the trend using the extrapolated generative model. Parameters ---------- periods: Int number of periods to forecast forward. freq: Any valid frequency for pd.date_range, such as 'D' or 'M'. include_history: Boolean to include the historical dates in the data frame for predictions. Returns ------- pd.Dataframe that extends forward from the end of self.history for the requested number of periods.
838
def array_to_blobproto(arr, diff=None): blob = caffe_pb2.BlobProto() blob.shape.dim.extend(arr.shape) blob.data.extend(arr.astype(float).flat) if diff is not None: blob.diff.extend(diff.astype(float).flat) return blob
Converts a N-dimensional array to blob proto. If diff is given, also convert the diff. You need to make sure that arr and diff have the same shape, and this function does not do sanity check.
36
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def array_to_blobproto(arr, diff=None): blob = caffe_pb2.BlobProto() blob.shape.dim.extend(arr.shape) blob.data.extend(arr.astype(float).flat) if diff is not None: blob.diff.extend(diff.astype(float).flat) return blob ``` ###Assistant : Converts a N-dimensional array to blob proto. If diff is given, also convert the diff. You need to make sure that arr and diff have the same shape, and this function does not do sanity check.
839
def test_cross_signing_keys_retry(self): remote_user_id = "@john:test_remote" remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" # Register mock device list retrieval on the federation client. federation_client = self.homeserver.get_federation_client() federation_client.query_user_devices = Mock( return_value=make_awaitable( { "user_id": remote_user_id, "stream_id": 1, "devices": [], "master_key": { "user_id": remote_user_id, "usage": ["master"], "keys": {"ed25519:" + remote_master_key: remote_master_key}, }, "self_signing_key": { "user_id": remote_user_id, "usage": ["self_signing"], "keys": { "ed25519:" + remote_self_signing_key: remote_self_signing_key }, }, } ) ) # Resync the device list. device_handler = self.homeserver.get_device_handler() self.get_success( device_handler.device_list_updater.user_device_resync(remote_user_id), ) # Retrieve the cross-signing keys for this user. keys = self.get_success( self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), ) self.assertTrue(remote_user_id in keys) # Check that the master key is the one returned by the mock. master_key = keys[remote_user_id]["master"] self.assertEqual(len(master_key["keys"]), 1) self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys()) self.assertTrue(remote_master_key in master_key["keys"].values()) # Check that the self-signing key is the one returned by the mock. self_signing_key = keys[remote_user_id]["self_signing"] self.assertEqual(len(self_signing_key["keys"]), 1) self.assertTrue( "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), ) self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values())
Tests that resyncing a device list correctly processes cross-signing keys from the remote server.
14
145
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cross_signing_keys_retry(self): remote_user_id = "@john:test_remote" remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" # Register mock device list retrieval on the federation client. federation_client = self.homeserver.get_federation_client() federation_client.query_user_devices = Mock( return_value=make_awaitable( { "user_id": remote_user_id, "stream_id": 1, "devices": [], "master_key": { "user_id": remote_user_id, "usage": ["master"], "keys": {"ed25519:" + remote_master_key: remote_master_key}, }, "self_signing_key": { "user_id": remote_user_id, "usage": ["self_signing"], "keys": { "ed25519:" + remote_self_signing_key: remote_self_signing_key }, }, } ) ) # Resync the device list. device_handler = self.homeserver.get_device_handler() self.get_success( device_handler.device_list_updater.user_device_resync(remote_user_id), ) # Retrieve the cross-signing keys for this user. keys = self.get_success( self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), ) self.assertTrue(remote_user_id in keys) # Check that the master key is the one returned by the mock. master_key = keys[remote_user_id]["master"] self.assertEqual(len(master_key["keys"]), 1) self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys()) self.assertTrue(remote_master_key in master_key["keys"].values()) # Check that the self-signing key is the one returned by the mock. self_signing_key = keys[remote_user_id]["self_signing"] self.assertEqual(len(self_signing_key["keys"]), 1) self.assertTrue( "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), ) self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values()) ``` ###Assistant : Tests that resyncing a device list correctly processes cross-signing keys from the remote server.
840
def test_exec_success(self, db_mock_class): run = { 'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, } op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run) db_mock = db_mock_class.return_value db_mock.submit_run.return_value = 1 db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '') op.execute(None) expected = databricks_operator._deep_string_coerce( {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID} ) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay, retry_args=None, ) db_mock.submit_run.assert_called_once_with(expected) db_mock.get_run_page_url.assert_called_once_with(RUN_ID) db_mock.get_run_state.assert_called_once_with(RUN_ID) assert RUN_ID == op.run_id
Test the execute function in case where the run is successful.
11
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_exec_success(self, db_mock_class): run = { 'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, } op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run) db_mock = db_mock_class.return_value db_mock.submit_run.return_value = 1 db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '') op.execute(None) expected = databricks_operator._deep_string_coerce( {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID} ) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay, retry_args=None, ) db_mock.submit_run.assert_called_once_with(expected) db_mock.get_run_page_url.assert_called_once_with(RUN_ID) db_mock.get_run_state.assert_called_once_with(RUN_ID) assert RUN_ID == op.run_id ``` ###Assistant : Test the execute function in case where the run is successful.
841
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 15 gray = _color_to_rgb((h, gray_s, gray_l), input="husl") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap)
Make a sequential palette that blends from dark to ``color``. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_dark_palette` function. Parameters ---------- color : base color for high values hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/dark_palette.rst
201
41
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 15 gray = _color_to_rgb((h, gray_s, gray_l), input="husl") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap) ``` ###Assistant : Make a sequential palette that blends from dark to ``color``. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_dark_palette` function. Parameters ---------- color : base color for high values hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/dark_palette.rst
842
def contour(self, X, win=None, env=None, opts=None): return self._surface(X=X, stype="contour", opts=opts, win=win, env=env)
This function draws a contour plot. It takes as input an `NxM` tensor `X` that specifies the value at each location in the contour plot. The following `opts` are supported: - `opts.colormap`: colormap (`string`; default = `'Viridis'`) - `opts.xmin` : clip minimum value (`number`; default = `X:min()`) - `opts.xmax` : clip maximum value (`number`; default = `X:max()`)
57
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def contour(self, X, win=None, env=None, opts=None): return self._surface(X=X, stype="contour", opts=opts, win=win, env=env) ``` ###Assistant : This function draws a contour plot. It takes as input an `NxM` tensor `X` that specifies the value at each location in the contour plot. The following `opts` are supported: - `opts.colormap`: colormap (`string`; default = `'Viridis'`) - `opts.xmin` : clip minimum value (`number`; default = `X:min()`) - `opts.xmax` : clip maximum value (`number`; default = `X:max()`)
843
def get_themes(): themes = {} eps = set(importlib_metadata.entry_points(group='mkdocs.themes')) builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'} for theme in eps: if theme.name in builtins and theme.dist.name != 'mkdocs': raise exceptions.ConfigurationError( f"The theme '{theme.name}' is a builtin theme but the package '{theme.dist.name}' " "attempts to provide a theme with the same name." ) elif theme.name in themes: log.warning( f"A theme named '{theme.name}' is provided by the Python packages '{theme.dist.name}' " f"and '{themes[theme.name].dist.name}'. The one in '{theme.dist.name}' will be used." ) themes[theme.name] = theme return themes
Return a dict of all installed themes as {name: EntryPoint}.
10
87
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_themes(): themes = {} eps = set(importlib_metadata.entry_points(group='mkdocs.themes')) builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'} for theme in eps: if theme.name in builtins and theme.dist.name != 'mkdocs': raise exceptions.ConfigurationError( f"The theme '{theme.name}' is a builtin theme but the package '{theme.dist.name}' " "attempts to provide a theme with the same name." ) elif theme.name in themes: log.warning( f"A theme named '{theme.name}' is provided by the Python packages '{theme.dist.name}' " f"and '{themes[theme.name].dist.name}'. The one in '{theme.dist.name}' will be used." ) themes[theme.name] = theme return themes ``` ###Assistant : Return a dict of all installed themes as {name: EntryPoint}.
844
def test_subscribe_by_default(self): NotificationSetting.objects.update_settings( ExternalProviders.EMAIL, NotificationSettingTypes.ISSUE_ALERTS, NotificationSettingOptionValues.NEVER, user=self.user, ) response = self.get_success_response("me") assert response.data.get("subscribeByDefault") is False
Test that we expect project-independent issue alert preferences to be returned as `subscribe_by_default`.
13
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_subscribe_by_default(self): NotificationSetting.objects.update_settings( ExternalProviders.EMAIL, NotificationSettingTypes.ISSUE_ALERTS, NotificationSettingOptionValues.NEVER, user=self.user, ) response = self.get_success_response("me") assert response.data.get("subscribeByDefault") is False ``` ###Assistant : Test that we expect project-independent issue alert preferences to be returned as `subscribe_by_default`.
845
def _get_users_with_any_permission_codenames_filter(self, permission_codenames): permissions = Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) return ( Q(is_superuser=True) | Q(user_permissions__in=permissions) | Q(groups__permissions__in=permissions) ) & Q(is_active=True)
Given a list of permission codenames, return a filter expression which will find all users which have any of those permissions - either through group permissions, user permissions, or implicitly through being a superuser.
34
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_users_with_any_permission_codenames_filter(self, permission_codenames): permissions = Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) return ( Q(is_superuser=True) | Q(user_permissions__in=permissions) | Q(groups__permissions__in=permissions) ) & Q(is_active=True) ``` ###Assistant : Given a list of permission codenames, return a filter expression which will find all users which have any of those permissions - either through group permissions, user permissions, or implicitly through being a superuser.
846
def _async_update_rssi_and_transients(self) -> None: for ( unique_id, ibeacon_advertisement, ) in self._last_ibeacon_advertisement_by_unique_id.items(): address = unique_id.split("_")[-1] service_info = bluetooth.async_last_service_info( self.hass, address, connectable=False ) if not service_info: continue if address in self._transient_seen_count: self._transient_seen_count[address] += 1 if self._transient_seen_count[address] == MIN_SEEN_TRANSIENT_NEW: self._transient_seen_count.pop(address) _async_dispatch_update( self.hass, unique_id, service_info, ibeacon_advertisement, True, True, ) continue if service_info.rssi != ibeacon_advertisement.rssi: ibeacon_advertisement.update_rssi(service_info.rssi) async_dispatcher_send( self.hass, signal_seen(unique_id), ibeacon_advertisement, )
Check to see if the rssi has changed and update any devices. We don't callback on RSSI changes so we need to check them here and send them over the dispatcher periodically to ensure the distance calculation is update. If the transient flag is set we also need to check to see if the device is still transmitting and increment the counter
62
56
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _async_update_rssi_and_transients(self) -> None: for ( unique_id, ibeacon_advertisement, ) in self._last_ibeacon_advertisement_by_unique_id.items(): address = unique_id.split("_")[-1] service_info = bluetooth.async_last_service_info( self.hass, address, connectable=False ) if not service_info: continue if address in self._transient_seen_count: self._transient_seen_count[address] += 1 if self._transient_seen_count[address] == MIN_SEEN_TRANSIENT_NEW: self._transient_seen_count.pop(address) _async_dispatch_update( self.hass, unique_id, service_info, ibeacon_advertisement, True, True, ) continue if service_info.rssi != ibeacon_advertisement.rssi: ibeacon_advertisement.update_rssi(service_info.rssi) async_dispatcher_send( self.hass, signal_seen(unique_id), ibeacon_advertisement, ) ``` ###Assistant : Check to see if the rssi has changed and update any devices. We don't callback on RSSI changes so we need to check them here and send them over the dispatcher periodically to ensure the distance calculation is update. If the transient flag is set we also need to check to see if the device is still transmitting and increment the counter
847
def setup_app_for_worker(app, loglevel, logfile) -> None: # type: (Celery, Union[str, int], str) -> None app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile)
Setup the app to be used for starting an embedded worker.
11
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_app_for_worker(app, loglevel, logfile) -> None: # type: (Celery, Union[str, int], str) -> None app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile) ``` ###Assistant : Setup the app to be used for starting an embedded worker.
848
def rank(self): if self._rank is None: self._rank = int(gray_to_bin(self.current), 2) return self._rank
Ranks the Gray code. A ranking algorithm determines the position (or rank) of a combinatorial object among all the objects w.r.t. a given order. For example, the 4 bit binary reflected Gray code (BRGC) '0101' has a rank of 6 as it appears in the 6th position in the canonical ordering of the family of 4 bit Gray codes. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> GrayCode(3, start='100').rank 7 >>> GrayCode(3, rank=7).current '100' See Also ======== unrank References ========== .. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html
97
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rank(self): if self._rank is None: self._rank = int(gray_to_bin(self.current), 2) return self._rank ``` ###Assistant : Ranks the Gray code. A ranking algorithm determines the position (or rank) of a combinatorial object among all the objects w.r.t. a given order. For example, the 4 bit binary reflected Gray code (BRGC) '0101' has a rank of 6 as it appears in the 6th position in the canonical ordering of the family of 4 bit Gray codes. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> GrayCode(3, start='100').rank 7 >>> GrayCode(3, rank=7).current '100' See Also ======== unrank References ========== .. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html
849
async def async_update(self, log_errors=True): if not self._async_client: self._async_client = get_async_client( self._hass, verify_ssl=self._verify_ssl ) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) _LOGGER.debug("Updating from %s", self._resource) try: response = await self._async_client.request( self._method, self._resource, headers=rendered_headers, params=rendered_params, auth=self._auth, content=self._request_data, timeout=self._timeout, follow_redirects=True, ) self.data = response.text self.headers = response.headers except httpx.TimeoutException as ex: if log_errors: _LOGGER.error("Timeout while fetching data: %s", self._resource) self.last_exception = ex self.data = None self.headers = None except httpx.RequestError as ex: if log_errors: _LOGGER.error( "Error fetching data: %s failed with %s", self._resource, ex ) self.last_exception = ex self.data = None self.headers = None
Get the latest data from REST service with provided method.
10
91
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self, log_errors=True): if not self._async_client: self._async_client = get_async_client( self._hass, verify_ssl=self._verify_ssl ) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) _LOGGER.debug("Updating from %s", self._resource) try: response = await self._async_client.request( self._method, self._resource, headers=rendered_headers, params=rendered_params, auth=self._auth, content=self._request_data, timeout=self._timeout, follow_redirects=True, ) self.data = response.text self.headers = response.headers except httpx.TimeoutException as ex: if log_errors: _LOGGER.error("Timeout while fetching data: %s", self._resource) self.last_exception = ex self.data = None self.headers = None except httpx.RequestError as ex: if log_errors: _LOGGER.error( "Error fetching data: %s failed with %s", self._resource, ex ) self.last_exception = ex self.data = None self.headers = None ``` ###Assistant : Get the latest data from REST service with provided method.
850
def set_default_timezone() -> None: dotenv.load_dotenv(USER_ENV_FILE) user_tz = os.getenv("OPENBB_TIMEZONE") if not user_tz: dotenv.set_key(USER_ENV_FILE, "OPENBB_TIMEZONE", "America/New_York")
Set a default (America/New_York) timezone if one doesn't exist.
9
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_default_timezone() -> None: dotenv.load_dotenv(USER_ENV_FILE) user_tz = os.getenv("OPENBB_TIMEZONE") if not user_tz: dotenv.set_key(USER_ENV_FILE, "OPENBB_TIMEZONE", "America/New_York") ``` ###Assistant : Set a default (America/New_York) timezone if one doesn't exist.
851
def return_stopped_response(self) -> dict: return {k: "Agent stopped due to max iterations." for k in self.return_values}
Return response when agent has been stopped due to max iterations.
11
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def return_stopped_response(self) -> dict: return {k: "Agent stopped due to max iterations." for k in self.return_values} ``` ###Assistant : Return response when agent has been stopped due to max iterations.
852
def clean(self, value): if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE: return None return super().clean(value)
When null option is enabled and "None" is sent as part of a form to be submitted, it is sent as the string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType.
38
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clean(self, value): if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE: return None return super().clean(value) ``` ###Assistant : When null option is enabled and "None" is sent as part of a form to be submitted, it is sent as the string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType.
853
def test_unexpected_auth_events(self): creator = "@creator:example.com" create_event = _create_event(RoomVersions.V9, creator) join_event = _join_event(RoomVersions.V9, creator) pl_event = _power_levels_event( RoomVersions.V9, creator, {"state_default": 30, "users": {"creator": 100}}, ) join_rules_event = _join_rules_event(RoomVersions.V9, creator, "public") event_store = _StubEventSourceStore() event_store.add_events([create_event, join_event, pl_event, join_rules_event]) good_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event] ) # join rules should *not* be included in the auth events. bad_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event, join_rules_event], ) get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, good_event) ) with self.assertRaises(AuthError): get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, bad_event) )
Events with excess auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules 2. Reject if event has auth_events that: 2. have entries whose type and state_key don’t match those specified by the auth events selection algorithm described in the server specification.
37
76
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unexpected_auth_events(self): creator = "@creator:example.com" create_event = _create_event(RoomVersions.V9, creator) join_event = _join_event(RoomVersions.V9, creator) pl_event = _power_levels_event( RoomVersions.V9, creator, {"state_default": 30, "users": {"creator": 100}}, ) join_rules_event = _join_rules_event(RoomVersions.V9, creator, "public") event_store = _StubEventSourceStore() event_store.add_events([create_event, join_event, pl_event, join_rules_event]) good_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event] ) # join rules should *not* be included in the auth events. bad_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event, join_rules_event], ) get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, good_event) ) with self.assertRaises(AuthError): get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, bad_event) ) ``` ###Assistant : Events with excess auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules 2. Reject if event has auth_events that: 2. have entries whose type and state_key don’t match those specified by the auth events selection algorithm described in the server specification.
854
async def _consume_incoming(self) -> None: while True: message_json = await self.incoming_queue.get() if message_json is None: self.incoming_queue.task_done() break type = message_json["type"] if type == "client_log": path = message_json["payload"]["path"] line_number = message_json["payload"]["line_number"] timestamp = message_json["payload"]["timestamp"] encoded_segments = message_json["payload"]["encoded_segments"] decoded_segments = base64.b64decode(encoded_segments) segments = pickle.loads(decoded_segments) self.service.console.print( DevtoolsLogMessage( segments=segments, path=path, line_number=line_number, unix_timestamp=timestamp, ) ) elif type == "client_spillover": spillover = int(message_json["payload"]["spillover"]) info_renderable = DevtoolsInternalMessage( f"Discarded {spillover} messages", level="warning" ) self.service.console.print(info_renderable) self.incoming_queue.task_done()
Consume messages from the incoming (client -> server) Queue, and print the corresponding renderables to the console for each message.
20
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _consume_incoming(self) -> None: while True: message_json = await self.incoming_queue.get() if message_json is None: self.incoming_queue.task_done() break type = message_json["type"] if type == "client_log": path = message_json["payload"]["path"] line_number = message_json["payload"]["line_number"] timestamp = message_json["payload"]["timestamp"] encoded_segments = message_json["payload"]["encoded_segments"] decoded_segments = base64.b64decode(encoded_segments) segments = pickle.loads(decoded_segments) self.service.console.print( DevtoolsLogMessage( segments=segments, path=path, line_number=line_number, unix_timestamp=timestamp, ) ) elif type == "client_spillover": spillover = int(message_json["payload"]["spillover"]) info_renderable = DevtoolsInternalMessage( f"Discarded {spillover} messages", level="warning" ) self.service.console.print(info_renderable) self.incoming_queue.task_done() ``` ###Assistant : Consume messages from the incoming (client -> server) Queue, and print the corresponding renderables to the console for each message.
855
def _get_basic_ray_cr() -> dict: cr_path = str( Path(__file__).resolve().parents[2] / "python" / "ray" / "autoscaler" / "kuberay" / "ray-cluster.complete.yaml" ) return yaml.safe_load(open(cr_path).read())
Returns the example Ray CR included in the Ray documentation.
10
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_basic_ray_cr() -> dict: cr_path = str( Path(__file__).resolve().parents[2] / "python" / "ray" / "autoscaler" / "kuberay" / "ray-cluster.complete.yaml" ) return yaml.safe_load(open(cr_path).read()) ``` ###Assistant : Returns the example Ray CR included in the Ray documentation.
856
def spectral_graph_forge(G, alpha, transformation="identity", seed=None): import numpy as np import scipy as sp import scipy.stats # call as sp.stats available_transformations = ["identity", "modularity"] alpha = np.clip(alpha, 0, 1) A = nx.to_numpy_array(G) n = A.shape[1] level = int(round(n * alpha)) if transformation not in available_transformations: msg = f"{transformation!r} is not a valid transformation. " msg += f"Transformations: {available_transformations}" raise nx.NetworkXError(msg) K = np.ones((1, n)) @ A B = A if transformation == "modularity": B -= K.T @ K / K.sum() # Compute low-rank approximation of B evals, evecs = np.linalg.eigh(B) k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 B = evecs @ np.diag(evals) @ evecs.T if transformation == "modularity": B += K.T @ K / K.sum() B = np.clip(B, 0, 1) np.fill_diagonal(B, 0) for i in range(n - 1): B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) H = nx.from_numpy_array(B) return H
Returns a random simple graph with spectrum resembling that of `G` This algorithm, called Spectral Graph Forge (SGF), computes the eigenvectors of a given graph adjacency matrix, filters them and builds a random graph with a similar eigenstructure. SGF has been proved to be particularly useful for synthesizing realistic social networks and it can also be used to anonymize graph sensitive data. Parameters ---------- G : Graph alpha : float Ratio representing the percentage of eigenvectors of G to consider, values in [0,1]. transformation : string, optional Represents the intended matrix linear transformation, possible values are 'identity' and 'modularity' seed : integer, random_state, or None (default) Indicator of numpy random number generation state. See :ref:`Randomness<randomness>`. Returns ------- H : Graph A graph with a similar eigenvector structure of the input one. Raises ------ NetworkXError If transformation has a value different from 'identity' or 'modularity' Notes ----- Spectral Graph Forge (SGF) generates a random simple graph resembling the global properties of the given one. It leverages the low-rank approximation of the associated adjacency matrix driven by the *alpha* precision parameter. SGF preserves the number of nodes of the input graph and their ordering. This way, nodes of output graphs resemble the properties of the input one and attributes can be directly mapped. It considers the graph adjacency matrices which can optionally be transformed to other symmetric real matrices (currently transformation options include *identity* and *modularity*). The *modularity* transformation, in the sense of Newman's modularity matrix allows the focusing on community structure related properties of the graph. SGF applies a low-rank approximation whose fixed rank is computed from the ratio *alpha* of the input graph adjacency matrix dimension. This step performs a filtering on the input eigenvectors similar to the low pass filtering common in telecommunications. The filtered values (after truncation) are used as input to a Bernoulli sampling for constructing a random adjacency matrix. References ---------- .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, "Spectral Graph Forge: Graph Generation Targeting Modularity", IEEE Infocom, '18. https://arxiv.org/abs/1801.01715 .. [2] M. Newman, "Networks: an introduction", Oxford university press, 2010 Examples -------- >>> G = nx.karate_club_graph() >>> H = nx.spectral_graph_forge(G, 0.3) >>>
358
169
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def spectral_graph_forge(G, alpha, transformation="identity", seed=None): import numpy as np import scipy as sp import scipy.stats # call as sp.stats available_transformations = ["identity", "modularity"] alpha = np.clip(alpha, 0, 1) A = nx.to_numpy_array(G) n = A.shape[1] level = int(round(n * alpha)) if transformation not in available_transformations: msg = f"{transformation!r} is not a valid transformation. " msg += f"Transformations: {available_transformations}" raise nx.NetworkXError(msg) K = np.ones((1, n)) @ A B = A if transformation == "modularity": B -= K.T @ K / K.sum() # Compute low-rank approximation of B evals, evecs = np.linalg.eigh(B) k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 B = evecs @ np.diag(evals) @ evecs.T if transformation == "modularity": B += K.T @ K / K.sum() B = np.clip(B, 0, 1) np.fill_diagonal(B, 0) for i in range(n - 1): B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) H = nx.from_numpy_array(B) return H ``` ###Assistant : Returns a random simple graph with spectrum resembling that of `G` This algorithm, called Spectral Graph Forge (SGF), computes the eigenvectors of a given graph adjacency matrix, filters them and builds a random graph with a similar eigenstructure. SGF has been proved to be particularly useful for synthesizing realistic social networks and it can also be used to anonymize graph sensitive data. Parameters ---------- G : Graph alpha : float Ratio representing the percentage of eigenvectors of G to consider, values in [0,1]. transformation : string, optional Represents the intended matrix linear transformation, possible values are 'identity' and 'modularity' seed : integer, random_state, or None (default) Indicator of numpy random number generation state. See :ref:`Randomness<randomness>`. Returns ------- H : Graph A graph with a similar eigenvector structure of the input one. Raises ------ NetworkXError If transformation has a value different from 'identity' or 'modularity' Notes ----- Spectral Graph Forge (SGF) generates a random simple graph resembling the global properties of the given one. It leverages the low-rank approximation of the associated adjacency matrix driven by the *alpha* precision parameter. SGF preserves the number of nodes of the input graph and their ordering. This way, nodes of output graphs resemble the properties of the input one and attributes can be directly mapped. It considers the graph adjacency matrices which can optionally be transformed to other symmetric real matrices (currently transformation options include *identity* and *modularity*). The *modularity* transformation, in the sense of Newman's modularity matrix allows the focusing on community structure related properties of the graph. SGF applies a low-rank approximation whose fixed rank is computed from the ratio *alpha* of the input graph adjacency matrix dimension. This step performs a filtering on the input eigenvectors similar to the low pass filtering common in telecommunications. The filtered values (after truncation) are used as input to a Bernoulli sampling for constructing a random adjacency matrix. References ---------- .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, "Spectral Graph Forge: Graph Generation Targeting Modularity", IEEE Infocom, '18. https://arxiv.org/abs/1801.01715 .. [2] M. Newman, "Networks: an introduction", Oxford university press, 2010 Examples -------- >>> G = nx.karate_club_graph() >>> H = nx.spectral_graph_forge(G, 0.3) >>>
857
def encode_example(self, example): example = cast_to_python_objects(example) return encode_nested_example(self, example)
Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]`
19
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def encode_example(self, example): example = cast_to_python_objects(example) return encode_nested_example(self, example) ``` ###Assistant : Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]`
858
def _add_unique_metric_name(self, metric_name, metric_fn, output_index): # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the string # "accuracy") rather than a `Metric` object, in which case we want to # prepend the output name even if we are loading a serialized model. if not getattr(metric_fn, "_from_serialized", False): metric_name = "%s_%s" % ( self.output_names[output_index], metric_name, ) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = "%s_%d" % (base_metric_name, j) j += 1 return metric_name
Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name
72
117
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_unique_metric_name(self, metric_name, metric_fn, output_index): # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the string # "accuracy") rather than a `Metric` object, in which case we want to # prepend the output name even if we are loading a serialized model. if not getattr(metric_fn, "_from_serialized", False): metric_name = "%s_%s" % ( self.output_names[output_index], metric_name, ) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = "%s_%d" % (base_metric_name, j) j += 1 return metric_name ``` ###Assistant : Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name
859
def strongly_connected_components(G): preorder = {} lowlink = {} scc_found = set() scc_queue = [] i = 0 # Preorder counter neighbors = {v: iter(G[v]) for v in G} for source in G: if source not in scc_found: queue = [source] while queue: v = queue[-1] if v not in preorder: i = i + 1 preorder[v] = i done = True for w in neighbors[v]: if w not in preorder: queue.append(w) done = False break if done: lowlink[v] = preorder[v] for w in G[v]: if w not in scc_found: if preorder[w] > preorder[v]: lowlink[v] = min([lowlink[v], lowlink[w]]) else: lowlink[v] = min([lowlink[v], preorder[w]]) queue.pop() if lowlink[v] == preorder[v]: scc = {v} while scc_queue and preorder[scc_queue[-1]] > preorder[v]: k = scc_queue.pop() scc.add(k) scc_found.update(scc) yield scc else: scc_queue.append(v) @not_implemented_for("undirected")
Generate nodes in strongly connected components of graph. Parameters ---------- G : NetworkX Graph A directed graph. Returns ------- comp : generator of sets A generator of sets of nodes, one for each strongly connected component of G. Raises ------ NetworkXNotImplemented If G is undirected. Examples -------- Generate a sorted list of strongly connected components, largest first. >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) >>> nx.add_cycle(G, [10, 11, 12]) >>> [ ... len(c) ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True) ... ] [4, 3] If you only want the largest component, it's more efficient to use max instead of sort. >>> largest = max(nx.strongly_connected_components(G), key=len) See Also -------- connected_components weakly_connected_components kosaraju_strongly_connected_components Notes ----- Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. Nonrecursive version of algorithm. References ---------- .. [1] Depth-first search and linear graph algorithms, R. Tarjan SIAM Journal of Computing 1(2):146-160, (1972). .. [2] On finding the strongly connected components in a directed graph. E. Nuutila and E. Soisalon-Soinen Information Processing Letters 49(1): 9-14, (1994)..
162
126
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def strongly_connected_components(G): preorder = {} lowlink = {} scc_found = set() scc_queue = [] i = 0 # Preorder counter neighbors = {v: iter(G[v]) for v in G} for source in G: if source not in scc_found: queue = [source] while queue: v = queue[-1] if v not in preorder: i = i + 1 preorder[v] = i done = True for w in neighbors[v]: if w not in preorder: queue.append(w) done = False break if done: lowlink[v] = preorder[v] for w in G[v]: if w not in scc_found: if preorder[w] > preorder[v]: lowlink[v] = min([lowlink[v], lowlink[w]]) else: lowlink[v] = min([lowlink[v], preorder[w]]) queue.pop() if lowlink[v] == preorder[v]: scc = {v} while scc_queue and preorder[scc_queue[-1]] > preorder[v]: k = scc_queue.pop() scc.add(k) scc_found.update(scc) yield scc else: scc_queue.append(v) @not_implemented_for("undirected") ``` ###Assistant : Generate nodes in strongly connected components of graph. Parameters ---------- G : NetworkX Graph A directed graph. Returns ------- comp : generator of sets A generator of sets of nodes, one for each strongly connected component of G. Raises ------ NetworkXNotImplemented If G is undirected. Examples -------- Generate a sorted list of strongly connected components, largest first. >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) >>> nx.add_cycle(G, [10, 11, 12]) >>> [ ... len(c) ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True) ... ] [4, 3] If you only want the largest component, it's more efficient to use max instead of sort. >>> largest = max(nx.strongly_connected_components(G), key=len) See Also -------- connected_components weakly_connected_components kosaraju_strongly_connected_components Notes ----- Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. Nonrecursive version of algorithm. References ---------- .. [1] Depth-first search and linear graph algorithms, R. Tarjan SIAM Journal of Computing 1(2):146-160, (1972). .. [2] On finding the strongly connected components in a directed graph. E. Nuutila and E. Soisalon-Soinen Information Processing Letters 49(1): 9-14, (1994)..
860
def score(self, X, y, sample_weight=None): # TODO: Adapt link to User Guide in the docstring, once # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. # # Note, default score defined in RegressorMixin is R^2 score. # TODO: make D^2 a score function in module metrics (and thereby get # input validation and so on) raw_prediction = self._linear_predictor(X) # validates X # required by losses y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) if sample_weight is not None: # Note that _check_sample_weight calls check_array(order="C") required by # losses. sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) base_loss = self._linear_loss.base_loss if not base_loss.in_y_true_range(y): raise ValueError( "Some value(s) of y are out of the valid range of the loss" f" {self._base_loss.__name__}." ) # Note that constant_to_optimal_zero is already multiplied by sample_weight. constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y)) if sample_weight is not None: constant *= sample_weight.shape[0] / np.sum(sample_weight) # Missing factor of 2 in deviance cancels out. deviance = base_loss( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1, ) y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) deviance_null = base_loss( y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1, ) return 1 - (deviance + constant) / (deviance_null + constant)
Compute D^2, the percentage of deviance explained. D^2 is a generalization of the coefficient of determination R^2. R^2 uses squared error and D^2 uses the deviance of this GLM, see the :ref:`User Guide <regression_metrics>`. D^2 is defined as :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, :math:`D_{null}` is the null deviance, i.e. the deviance of a model with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. The mean :math:`\\bar{y}` is averaged by sample_weight. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True values of target. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float D^2 of self.predict(X) w.r.t. y.
127
172
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def score(self, X, y, sample_weight=None): # TODO: Adapt link to User Guide in the docstring, once # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. # # Note, default score defined in RegressorMixin is R^2 score. # TODO: make D^2 a score function in module metrics (and thereby get # input validation and so on) raw_prediction = self._linear_predictor(X) # validates X # required by losses y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False) if sample_weight is not None: # Note that _check_sample_weight calls check_array(order="C") required by # losses. sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) base_loss = self._linear_loss.base_loss if not base_loss.in_y_true_range(y): raise ValueError( "Some value(s) of y are out of the valid range of the loss" f" {self._base_loss.__name__}." ) # Note that constant_to_optimal_zero is already multiplied by sample_weight. constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y)) if sample_weight is not None: constant *= sample_weight.shape[0] / np.sum(sample_weight) # Missing factor of 2 in deviance cancels out. deviance = base_loss( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1, ) y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) deviance_null = base_loss( y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1, ) return 1 - (deviance + constant) / (deviance_null + constant) ``` ###Assistant : Compute D^2, the percentage of deviance explained. D^2 is a generalization of the coefficient of determination R^2. R^2 uses squared error and D^2 uses the deviance of this GLM, see the :ref:`User Guide <regression_metrics>`. D^2 is defined as :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, :math:`D_{null}` is the null deviance, i.e. the deviance of a model with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. The mean :math:`\\bar{y}` is averaged by sample_weight. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True values of target. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float D^2 of self.predict(X) w.r.t. y.
861
def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 # Check weight shape. w = tf.convert_to_tensor(w) assert w.shape.rank == 4 convH = w.shape[0].value convW = w.shape[1].value inC = _shape(w, 2) outC = _shape(w, 3) assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor ** 2)) p = (k.shape[0] - factor) - (convW - 1) # Determine data dimensions. if data_format == 'NCHW': stride = [1, 1, factor, factor] output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW] num_groups = _shape(x, 1) // inC else: stride = [1, factor, factor, 1] output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC] num_groups = _shape(x, 3) // inC # Transpose weights. w = tf.reshape(w, [convH, convW, inC, num_groups, -1]) w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2]) w = tf.reshape(w, [convH, convW, -1, num_groups * inC]) # Execute. x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format) return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl) #----------------------------------------------------------------------------
Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`.
158
198
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 # Check weight shape. w = tf.convert_to_tensor(w) assert w.shape.rank == 4 convH = w.shape[0].value convW = w.shape[1].value inC = _shape(w, 2) outC = _shape(w, 3) assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor ** 2)) p = (k.shape[0] - factor) - (convW - 1) # Determine data dimensions. if data_format == 'NCHW': stride = [1, 1, factor, factor] output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW] num_groups = _shape(x, 1) // inC else: stride = [1, factor, factor, 1] output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC] num_groups = _shape(x, 3) // inC # Transpose weights. w = tf.reshape(w, [convH, convW, inC, num_groups, -1]) w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2]) w = tf.reshape(w, [convH, convW, -1, num_groups * inC]) # Execute. x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format) return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl) #---------------------------------------------------------------------------- ``` ###Assistant : Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`.
862
def normalize_config(config): return json.loads(json.dumps(config, cls=NumpyEncoder))
Convert to json string and back again to remove numpy types.
11
5
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize_config(config): return json.loads(json.dumps(config, cls=NumpyEncoder)) ``` ###Assistant : Convert to json string and back again to remove numpy types.
863
def ensure_srgb(img, srgb_profile=None): img_info = dict(img.info) icc = img_info.pop("icc_profile", None) if not icc: return img if ImageCms is None: raise RuntimeError("ImageCms is required for color profile utilities") if srgb_profile is not None: srgb_profile = ImageCms.ImageCmsProfile(srgb_profile) else: srgb_profile = DEFAULT_SRGB_PROFILE buf = BytesIO(icc) try: orig_profile = ImageCms.ImageCmsProfile(buf) color_space = orig_profile.profile.xcolor_space except (AttributeError, OSError, TypeError, ValueError): return None finally: buf.close() if color_space == "RGB ": logger.debug("Returning img (RGB)") return img if color_space not in ("GRAY", "CMYK"): # Other color spaces are rare, but best not to try to convert them. # Upstream understands a None return as meaning it should not # use it for the target encoder. logger.debug( "Cannot convert to sRGB; color space = %s", (color_space.strip()), ) return None # Probably not possible to have an animated image with CMYK or GRAY icc # profile, but best leave it alone if we have one if getattr(img, "is_animated", False): return None if color_space == "GRAY": pil_mode = "L" else: pil_mode = "CMYK" logger.debug("Converting from %s to sRGB", color_space) transform = ImageCms.ImageCmsTransform( orig_profile, srgb_profile, pil_mode, "RGBA", intent=ImageCms.INTENT_RELATIVE_COLORIMETRIC, flags=TRANSFORM_FLAGS, ) src_im = Image.new(pil_mode, img.size, "white") src_im.paste(img) dst_im = Image.new("RGBA", img.size, "white") dst_im.info = img_info dst_im = transform.apply(src_im, dst_im) dst_im = dst_im.convert("RGB") dst_im.info = img_info return dst_im
Ensures that an image either has no ICC profile (and so is implicitly sRGB) or has an sRGB color profile. If the image is sRGB, it is returned unchanged. If it has a CMYK or Gray color profile, this function will return an image converted to sRGB. Any color profiles in other color spaces will return None.
57
203
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ensure_srgb(img, srgb_profile=None): img_info = dict(img.info) icc = img_info.pop("icc_profile", None) if not icc: return img if ImageCms is None: raise RuntimeError("ImageCms is required for color profile utilities") if srgb_profile is not None: srgb_profile = ImageCms.ImageCmsProfile(srgb_profile) else: srgb_profile = DEFAULT_SRGB_PROFILE buf = BytesIO(icc) try: orig_profile = ImageCms.ImageCmsProfile(buf) color_space = orig_profile.profile.xcolor_space except (AttributeError, OSError, TypeError, ValueError): return None finally: buf.close() if color_space == "RGB ": logger.debug("Returning img (RGB)") return img if color_space not in ("GRAY", "CMYK"): # Other color spaces are rare, but best not to try to convert them. # Upstream understands a None return as meaning it should not # use it for the target encoder. logger.debug( "Cannot convert to sRGB; color space = %s", (color_space.strip()), ) return None # Probably not possible to have an animated image with CMYK or GRAY icc # profile, but best leave it alone if we have one if getattr(img, "is_animated", False): return None if color_space == "GRAY": pil_mode = "L" else: pil_mode = "CMYK" logger.debug("Converting from %s to sRGB", color_space) transform = ImageCms.ImageCmsTransform( orig_profile, srgb_profile, pil_mode, "RGBA", intent=ImageCms.INTENT_RELATIVE_COLORIMETRIC, flags=TRANSFORM_FLAGS, ) src_im = Image.new(pil_mode, img.size, "white") src_im.paste(img) dst_im = Image.new("RGBA", img.size, "white") dst_im.info = img_info dst_im = transform.apply(src_im, dst_im) dst_im = dst_im.convert("RGB") dst_im.info = img_info return dst_im ``` ###Assistant : Ensures that an image either has no ICC profile (and so is implicitly sRGB) or has an sRGB color profile. If the image is sRGB, it is returned unchanged. If it has a CMYK or Gray color profile, this function will return an image converted to sRGB. Any color profiles in other color spaces will return None.
864
def _cast_single_input(self, x): if self._should_cast_single_input(x): return tf.cast(x, self._compute_dtype_object) else: return x # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property.
Cast a single Tensor or TensorSpec to the compute dtype.
10
41
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cast_single_input(self, x): if self._should_cast_single_input(x): return tf.cast(x, self._compute_dtype_object) else: return x # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property. ``` ###Assistant : Cast a single Tensor or TensorSpec to the compute dtype.
865
def queryset_in_batches(queryset): start_pk = 0 while True: qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE] pks = list(qs.values_list("pk", flat=True)) if not pks: break yield pks start_pk = pks[-1]
Slice a queryset into batches. Input queryset should be sorted be pk.
12
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def queryset_in_batches(queryset): start_pk = 0 while True: qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE] pks = list(qs.values_list("pk", flat=True)) if not pks: break yield pks start_pk = pks[-1] ``` ###Assistant : Slice a queryset into batches. Input queryset should be sorted be pk.
866
def execute(): frappe.reload_doc("stock", "doctype", "purchase_receipt") frappe.reload_doc("stock", "doctype", "purchase_receipt_item") frappe.reload_doc("stock", "doctype", "delivery_note") frappe.reload_doc("stock", "doctype", "delivery_note_item") frappe.reload_doc("stock", "doctype", "stock_settings") def update_from_return_docs(doctype): for return_doc in frappe.get_all( doctype, filters={"is_return": 1, "docstatus": 1, "return_against": ("!=", "")} ): # Update original receipt/delivery document from return return_doc = frappe.get_cached_doc(doctype, return_doc.name) try: return_doc.update_prevdoc_status() except OverAllowanceError: frappe.db.rollback() continue return_against = frappe.get_doc(doctype, return_doc.return_against) return_against.update_billing_status() frappe.db.commit() # Set received qty in stock uom in PR, as returned qty is checked against it frappe.db.sql( ) for doctype in ("Purchase Receipt", "Delivery Note"): update_from_return_docs(doctype)
update `tabPurchase Receipt Item` set received_stock_qty = received_qty * conversion_factor where docstatus = 1
14
81
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc("stock", "doctype", "purchase_receipt") frappe.reload_doc("stock", "doctype", "purchase_receipt_item") frappe.reload_doc("stock", "doctype", "delivery_note") frappe.reload_doc("stock", "doctype", "delivery_note_item") frappe.reload_doc("stock", "doctype", "stock_settings") def update_from_return_docs(doctype): for return_doc in frappe.get_all( doctype, filters={"is_return": 1, "docstatus": 1, "return_against": ("!=", "")} ): # Update original receipt/delivery document from return return_doc = frappe.get_cached_doc(doctype, return_doc.name) try: return_doc.update_prevdoc_status() except OverAllowanceError: frappe.db.rollback() continue return_against = frappe.get_doc(doctype, return_doc.return_against) return_against.update_billing_status() frappe.db.commit() # Set received qty in stock uom in PR, as returned qty is checked against it frappe.db.sql( ) for doctype in ("Purchase Receipt", "Delivery Note"): update_from_return_docs(doctype) ``` ###Assistant : update `tabPurchase Receipt Item` set received_stock_qty = received_qty * conversion_factor where docstatus = 1
867
def call_dex(self, other_args): parser = argparse.ArgumentParser( prog="dex", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", nargs="+", help="Sort by given column. Default: Daily Volume [$]", default="Daily Volume [$]", ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: dappradar_view.display_top_dexes( sortby=" ".join(ns_parser.sortby), limit=ns_parser.limit, export=ns_parser.export, )
Process dex command Shows top decentralized exchanges [Source: https://dappradar.com/] Accepts --sort {Name,Daily Users,Daily Volume [$]} to sort by column
19
55
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_dex(self, other_args): parser = argparse.ArgumentParser( prog="dex", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="Number of records to display", default=15, ) parser.add_argument( "-s", "--sort", dest="sortby", nargs="+", help="Sort by given column. Default: Daily Volume [$]", default="Daily Volume [$]", ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: dappradar_view.display_top_dexes( sortby=" ".join(ns_parser.sortby), limit=ns_parser.limit, export=ns_parser.export, ) ``` ###Assistant : Process dex command Shows top decentralized exchanges [Source: https://dappradar.com/] Accepts --sort {Name,Daily Users,Daily Volume [$]} to sort by column
868
def get_backend_for_dir(self, location): # type: (str) -> Optional[VersionControl] vcs_backends = {} for vcs_backend in self._registry.values(): repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue logger.debug('Determine that %s uses VCS: %s', location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: return None # Choose the VCS in the inner-most directory. Since all repository # roots found here would be either `location` or one of its # parents, the longest path should have the most path components, # i.e. the backend representing the inner-most repository. inner_most_repo_path = max(vcs_backends, key=len) return vcs_backends[inner_most_repo_path]
Return a VersionControl object if a repository of that type is found at the given directory.
16
86
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_backend_for_dir(self, location): # type: (str) -> Optional[VersionControl] vcs_backends = {} for vcs_backend in self._registry.values(): repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue logger.debug('Determine that %s uses VCS: %s', location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: return None # Choose the VCS in the inner-most directory. Since all repository # roots found here would be either `location` or one of its # parents, the longest path should have the most path components, # i.e. the backend representing the inner-most repository. inner_most_repo_path = max(vcs_backends, key=len) return vcs_backends[inner_most_repo_path] ``` ###Assistant : Return a VersionControl object if a repository of that type is found at the given directory.
869
def has_access(self, action_name, resource_name, user=None) -> bool: if not user: user = g.user if user.is_anonymous: user.roles = self.get_user_roles(user) has_access = self._has_access(user, action_name, resource_name) # FAB built-in view access method. Won't work for AllDag access. if self.is_dag_resource(resource_name): if action_name == permissions.ACTION_CAN_READ: has_access |= self.can_read_dag(resource_name, user) elif action_name == permissions.ACTION_CAN_EDIT: has_access |= self.can_edit_dag(resource_name, user) return has_access
Verify whether a given user could perform a certain action (e.g can_read, can_write) on the given resource. :param action_name: action_name on resource (e.g can_read, can_edit). :param resource_name: name of view-menu or resource. :param user: user name :return: Whether user could perform certain action on the resource. :rtype bool
48
54
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_access(self, action_name, resource_name, user=None) -> bool: if not user: user = g.user if user.is_anonymous: user.roles = self.get_user_roles(user) has_access = self._has_access(user, action_name, resource_name) # FAB built-in view access method. Won't work for AllDag access. if self.is_dag_resource(resource_name): if action_name == permissions.ACTION_CAN_READ: has_access |= self.can_read_dag(resource_name, user) elif action_name == permissions.ACTION_CAN_EDIT: has_access |= self.can_edit_dag(resource_name, user) return has_access ``` ###Assistant : Verify whether a given user could perform a certain action (e.g can_read, can_write) on the given resource. :param action_name: action_name on resource (e.g can_read, can_edit). :param resource_name: name of view-menu or resource. :param user: user name :return: Whether user could perform certain action on the resource. :rtype bool
870
def has_refs(self) -> bool: return len(self._session_report_run_counts) > 0
True if this Entry has references from any AppSession. If not, it can be removed from the cache.
18
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_refs(self) -> bool: return len(self._session_report_run_counts) > 0 ``` ###Assistant : True if this Entry has references from any AppSession. If not, it can be removed from the cache.
871
def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices): val_id_shape = batch.valid_indices.shape reshaped_losses = torch.zeros( val_id_shape, device=losses.device, dtype=losses.dtype ) reshaped_num_target_tokens = torch.zeros( val_id_shape, device=num_target_tokens.device, dtype=num_target_tokens.dtype ) reshaped_losses[indices] = losses reshaped_num_target_tokens[indices] = num_target_tokens return (reshaped_losses, reshaped_num_target_tokens)
MultitaskAgent shuffles and combines examples from both classifier and the generator tasks in a single batch. We compute losses only for those exs in the batch resulting in losses and num_target_tokens vectors that are smaller than the. This method reshapes the losses and num_target_tokens vectors back to the batch size. This is needed to record local metrics as the metrics need to be of batch size. Args: batch: batch being processed in this iteration. losses: classifier or generator loss vector (shape: b' X 1), where b' <= b. num_target_tokens: number of tokens in each examples for classification or generation tasks. (shape: b' X 1), where b' <= b. indices: indices of (either classification or generation) exs for which the loss was computed. Returns: A tuple of reshaped losses and num_target_tokens, both of shape: b X 1.
136
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices): val_id_shape = batch.valid_indices.shape reshaped_losses = torch.zeros( val_id_shape, device=losses.device, dtype=losses.dtype ) reshaped_num_target_tokens = torch.zeros( val_id_shape, device=num_target_tokens.device, dtype=num_target_tokens.dtype ) reshaped_losses[indices] = losses reshaped_num_target_tokens[indices] = num_target_tokens return (reshaped_losses, reshaped_num_target_tokens) ``` ###Assistant : MultitaskAgent shuffles and combines examples from both classifier and the generator tasks in a single batch. We compute losses only for those exs in the batch resulting in losses and num_target_tokens vectors that are smaller than the. This method reshapes the losses and num_target_tokens vectors back to the batch size. This is needed to record local metrics as the metrics need to be of batch size. Args: batch: batch being processed in this iteration. losses: classifier or generator loss vector (shape: b' X 1), where b' <= b. num_target_tokens: number of tokens in each examples for classification or generation tasks. (shape: b' X 1), where b' <= b. indices: indices of (either classification or generation) exs for which the loss was computed. Returns: A tuple of reshaped losses and num_target_tokens, both of shape: b X 1.
872
def add_support(self, location, type): if location not in self._node_labels: raise ValueError("Support must be added on a known node") else: self._supports[location] = type
This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> from sympy import symbols >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'}
71
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_support(self, location, type): if location not in self._node_labels: raise ValueError("Support must be added on a known node") else: self._supports[location] = type ``` ###Assistant : This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> from sympy import symbols >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'}
873
def makeport(self): sock = socket.create_server(("", 0), family=self.af, backlog=1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) return sock
Create a new socket and send a PORT command for it.
11
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def makeport(self): sock = socket.create_server(("", 0), family=self.af, backlog=1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) return sock ``` ###Assistant : Create a new socket and send a PORT command for it.
874
def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == "qutebrowser test userscript") @pytest.mark.parametrize("url, expected_matches", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ])
Test that a script gets read from file, parsed and returned.
11
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == "qutebrowser test userscript") @pytest.mark.parametrize("url, expected_matches", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ]) ``` ###Assistant : Test that a script gets read from file, parsed and returned.
875
def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): global msysroot if not msysroot: msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): split_path = split_path_msys else: split_path = split_path_win32 else: split_path = split_path_unix split_path.__doc__ =
Splits a path by / or \\; do not confuse this function with with ``os.path.split`` :type path: string :param path: path to split :return: list of string
27
59
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): global msysroot if not msysroot: msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): split_path = split_path_msys else: split_path = split_path_win32 else: split_path = split_path_unix split_path.__doc__ = ``` ###Assistant : Splits a path by / or \\; do not confuse this function with with ``os.path.split`` :type path: string :param path: path to split :return: list of string
876
def find_image_duplicates(image, user, permission_policy): instances = permission_policy.instances_user_has_permission_for(user, "choose") return instances.exclude(pk=image.pk).filter(file_hash=image.file_hash)
Finds all the duplicates of a given image. To keep things simple, two images are considered to be duplicates if they have the same `file_hash` value. This function also ensures that the `user` can choose one of the duplicate images returned (if any).
43
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_image_duplicates(image, user, permission_policy): instances = permission_policy.instances_user_has_permission_for(user, "choose") return instances.exclude(pk=image.pk).filter(file_hash=image.file_hash) ``` ###Assistant : Finds all the duplicates of a given image. To keep things simple, two images are considered to be duplicates if they have the same `file_hash` value. This function also ensures that the `user` can choose one of the duplicate images returned (if any).
877
def _trim_arity(func, maxargs=2): global _trim_arity_call_line if func in _single_arg_builtins: return lambda s, l, t: func(t) limit = 0 found_arity = False
decorator to trim function calls to match the arity of the target
12
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _trim_arity(func, maxargs=2): global _trim_arity_call_line if func in _single_arg_builtins: return lambda s, l, t: func(t) limit = 0 found_arity = False ``` ###Assistant : decorator to trim function calls to match the arity of the target
878
def call_exmarkets(self, other_args): parser = argparse.ArgumentParser( prog="exmarkets", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-e", "--exchange", help="Identifier of exchange e.g for Binance Exchange -> binance", dest="exchange", default="binance", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="display N number records", default=10, ) parser.add_argument( "-s", "--sortby", dest="sortby", type=str, help="Sort by given column. Default: reported_volume_24h_share", default="reported_volume_24h_share", choices=coinpaprika_model.EXMARKETS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=False, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help=, default=False, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-e") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_exchange_markets( exchange=ns_parser.exchange, limit=ns_parser.limit, export=ns_parser.export, sortby=ns_parser.sortby, ascend=not ns_parser.descend, links=ns_parser.urls, )
Process exmarkets commandGet all exchange markets found for given exchange You can display only N number of records with --limit parameter. You can sort data by pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score --sortby parameter and also with --descend flag to sort descending. You can use additional flag --urls to see urls for each market Displays: exchange_id, pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score,Flag to show urls. If you will use that flag you will see only: exchange, pair, trust_score, market_url columns
82
101
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_exmarkets(self, other_args): parser = argparse.ArgumentParser( prog="exmarkets", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-e", "--exchange", help="Identifier of exchange e.g for Binance Exchange -> binance", dest="exchange", default="binance", type=str, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="display N number records", default=10, ) parser.add_argument( "-s", "--sortby", dest="sortby", type=str, help="Sort by given column. Default: reported_volume_24h_share", default="reported_volume_24h_share", choices=coinpaprika_model.EXMARKETS_FILTERS, ) parser.add_argument( "--descend", action="store_false", help="Flag to sort in descending order (lowest first)", dest="descend", default=False, ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help=, default=False, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-e") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_exchange_markets( exchange=ns_parser.exchange, limit=ns_parser.limit, export=ns_parser.export, sortby=ns_parser.sortby, ascend=not ns_parser.descend, links=ns_parser.urls, ) ``` ###Assistant : Process exmarkets commandGet all exchange markets found for given exchange You can display only N number of records with --limit parameter. You can sort data by pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score --sortby parameter and also with --descend flag to sort descending. You can use additional flag --urls to see urls for each market Displays: exchange_id, pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score,Flag to show urls. If you will use that flag you will see only: exchange, pair, trust_score, market_url columns
879
async def test_unload_config_entry(hass, entry, lcn_connection): await hass.config_entries.async_unload(entry.entry_id) assert hass.states.get("cover.cover_outputs").state == STATE_UNAVAILABLE assert hass.states.get("cover.cover_relays").state == STATE_UNAVAILABLE
Test the cover is removed when the config entry is unloaded.
11
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_unload_config_entry(hass, entry, lcn_connection): await hass.config_entries.async_unload(entry.entry_id) assert hass.states.get("cover.cover_outputs").state == STATE_UNAVAILABLE assert hass.states.get("cover.cover_relays").state == STATE_UNAVAILABLE ``` ###Assistant : Test the cover is removed when the config entry is unloaded.
880
def topk(self, k, axis=-1, split_every=None): from dask.array.reductions import topk return topk(self, k, axis=axis, split_every=split_every)
The top k elements of an array. See :func:`dask.array.topk` for docstring.
11
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def topk(self, k, axis=-1, split_every=None): from dask.array.reductions import topk return topk(self, k, axis=axis, split_every=split_every) ``` ###Assistant : The top k elements of an array. See :func:`dask.array.topk` for docstring.
881
def for_each_ternary(self, fn, selector=None, row=None, col=None) -> "Figure": for obj in self.select_ternaries(selector=selector, row=row, col=col): fn(obj) return self
Apply a function to all ternary objects that satisfy the specified selection criteria Parameters ---------- fn: Function that inputs a single ternary object. selector: dict, function, or None (default None) Dict to use as selection criteria. ternary objects will be selected if they contain properties corresponding to all of the dictionary's keys, with values that exactly match the supplied values. If None (the default), all ternary objects are selected. If a function, it must be a function accepting a single argument and returning a boolean. The function will be called on each ternary and those for which the function returned True will be in the selection. row, col: int or None (default None) Subplot row and column index of ternary objects to select. To select ternary objects by row and column, the Figure must have been created using plotly.subplots.make_subplots. If None (the default), all ternary objects are selected. Returns ------- self Returns the Figure object that the method was called on
161
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def for_each_ternary(self, fn, selector=None, row=None, col=None) -> "Figure": for obj in self.select_ternaries(selector=selector, row=row, col=col): fn(obj) return self ``` ###Assistant : Apply a function to all ternary objects that satisfy the specified selection criteria Parameters ---------- fn: Function that inputs a single ternary object. selector: dict, function, or None (default None) Dict to use as selection criteria. ternary objects will be selected if they contain properties corresponding to all of the dictionary's keys, with values that exactly match the supplied values. If None (the default), all ternary objects are selected. If a function, it must be a function accepting a single argument and returning a boolean. The function will be called on each ternary and those for which the function returned True will be in the selection. row, col: int or None (default None) Subplot row and column index of ternary objects to select. To select ternary objects by row and column, the Figure must have been created using plotly.subplots.make_subplots. If None (the default), all ternary objects are selected. Returns ------- self Returns the Figure object that the method was called on
882
def call(self, *args, **kwargs): warnings.warn( "'call()' method is deprecated. " + "Use '__call__()' instead", DeprecationWarning, ) return self.__call__(*args, **kwargs)
Use ``__call__`` instead because this method is deprecated.
8
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, *args, **kwargs): warnings.warn( "'call()' method is deprecated. " + "Use '__call__()' instead", DeprecationWarning, ) return self.__call__(*args, **kwargs) ``` ###Assistant : Use ``__call__`` instead because this method is deprecated.
883
def validate_per_replica_inputs(distribution_strategy, x): # Convert the inputs and targets into a list of PerReplica objects. per_replica_list = tf.nest.flatten(x) x_values_list = [] for x in per_replica_list: # At this point x should contain only tensors. x_values = distribution_strategy.unwrap(x) for value in x_values: if not tf.is_tensor(value): raise ValueError('Dataset input to the model should be tensors instead ' 'they are of type {}'.format(type(value))) if not tf.executing_eagerly(): # Validate that the shape and dtype of all the elements in x are the same. validate_all_tensor_shapes(x, x_values) validate_all_tensor_types(x, x_values) x_values_list.append(x_values[0]) return x_values_list
Validates PerReplica dataset input list. Args: distribution_strategy: The current DistributionStrategy used to call `fit`, `evaluate` and `predict`. x: A list of PerReplica objects that represent the input or target values. Returns: List containing the first element of each of the PerReplica objects in the input list. Raises: ValueError: If any of the objects in the `per_replica_list` is not a tensor.
60
86
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_per_replica_inputs(distribution_strategy, x): # Convert the inputs and targets into a list of PerReplica objects. per_replica_list = tf.nest.flatten(x) x_values_list = [] for x in per_replica_list: # At this point x should contain only tensors. x_values = distribution_strategy.unwrap(x) for value in x_values: if not tf.is_tensor(value): raise ValueError('Dataset input to the model should be tensors instead ' 'they are of type {}'.format(type(value))) if not tf.executing_eagerly(): # Validate that the shape and dtype of all the elements in x are the same. validate_all_tensor_shapes(x, x_values) validate_all_tensor_types(x, x_values) x_values_list.append(x_values[0]) return x_values_list ``` ###Assistant : Validates PerReplica dataset input list. Args: distribution_strategy: The current DistributionStrategy used to call `fit`, `evaluate` and `predict`. x: A list of PerReplica objects that represent the input or target values. Returns: List containing the first element of each of the PerReplica objects in the input list. Raises: ValueError: If any of the objects in the `per_replica_list` is not a tensor.
884
def get_mac_addr(self): if ( self.bulb.host_firmware_version and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW ): octets = [int(octet, 16) for octet in self.mac_addr.split(":")] octets[5] = (octets[5] + 1) % 256 return ":".join(f"{octet:02x}" for octet in octets) return self.mac_addr
Increment the last byte of the mac address by one for FW>3.70.
12
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_mac_addr(self): if ( self.bulb.host_firmware_version and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW ): octets = [int(octet, 16) for octet in self.mac_addr.split(":")] octets[5] = (octets[5] + 1) % 256 return ":".join(f"{octet:02x}" for octet in octets) return self.mac_addr ``` ###Assistant : Increment the last byte of the mac address by one for FW>3.70.
885
def test_positive_integer_or_none_4(): assert positive_integer_or_none('none') is None assert positive_integer_or_none('None') is None
Assert that the TPOT CLI interface's positive_integer_or_none parsing return None when value is string 'None' or 'none'.
17
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_positive_integer_or_none_4(): assert positive_integer_or_none('none') is None assert positive_integer_or_none('None') is None ``` ###Assistant : Assert that the TPOT CLI interface's positive_integer_or_none parsing return None when value is string 'None' or 'none'.
886
def get_local_part(value): local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError( "expected local-part but found '{}'".format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get_obs_local_part(str(local_part) + value) if obs_local_part.token_type == 'invalid-obs-local-part': local_part.defects.append(errors.InvalidHeaderDefect( "local-part is not dot-atom, quoted-string, or obs-local-part")) else: local_part.defects.append(errors.ObsoleteHeaderDefect( "local-part is not a dot-atom (contains CFWS)")) local_part[0] = obs_local_part try: local_part.value.encode('ascii') except UnicodeEncodeError: local_part.defects.append(errors.NonASCIILocalPartDefect( "local-part contains non-ASCII characters)")) return local_part, value
local-part = dot-atom / quoted-string / obs-local-part
7
112
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_local_part(value): local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError( "expected local-part but found '{}'".format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get_obs_local_part(str(local_part) + value) if obs_local_part.token_type == 'invalid-obs-local-part': local_part.defects.append(errors.InvalidHeaderDefect( "local-part is not dot-atom, quoted-string, or obs-local-part")) else: local_part.defects.append(errors.ObsoleteHeaderDefect( "local-part is not a dot-atom (contains CFWS)")) local_part[0] = obs_local_part try: local_part.value.encode('ascii') except UnicodeEncodeError: local_part.defects.append(errors.NonASCIILocalPartDefect( "local-part contains non-ASCII characters)")) return local_part, value ``` ###Assistant : local-part = dot-atom / quoted-string / obs-local-part
887
def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(feature_extract_tester.batch_size): if equal_resolution: width = height = feature_extract_tester.max_resolution else: width, height = np.random.choice( np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2 ) video = prepare_video( feature_extract_tester=feature_extract_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs
This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. One can specify whether the videos are of the same resolution or not.
51
57
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(feature_extract_tester.batch_size): if equal_resolution: width = height = feature_extract_tester.max_resolution else: width, height = np.random.choice( np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2 ) video = prepare_video( feature_extract_tester=feature_extract_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs ``` ###Assistant : This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. One can specify whether the videos are of the same resolution or not.
888
def _verify_no_matching_http_header(self, ssl_vhost, header_substring): header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path) if header_path: # "Existing Header directive for virtualhost" pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.parser.aug.get(match).lower()): raise errors.PluginEnhancementAlreadyPresent( "Existing %s header" % header_substring)
Checks to see if there is an existing Header directive that contains the string header_substring. :param ssl_vhost: vhost to check :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost` :param header_substring: string that uniquely identifies a header. e.g: Strict-Transport-Security, Upgrade-Insecure-Requests. :type str :returns: boolean :rtype: (bool) :raises errors.PluginEnhancementAlreadyPresent When header header_substring exists
46
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _verify_no_matching_http_header(self, ssl_vhost, header_substring): header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path) if header_path: # "Existing Header directive for virtualhost" pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.parser.aug.get(match).lower()): raise errors.PluginEnhancementAlreadyPresent( "Existing %s header" % header_substring) ``` ###Assistant : Checks to see if there is an existing Header directive that contains the string header_substring. :param ssl_vhost: vhost to check :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost` :param header_substring: string that uniquely identifies a header. e.g: Strict-Transport-Security, Upgrade-Insecure-Requests. :type str :returns: boolean :rtype: (bool) :raises errors.PluginEnhancementAlreadyPresent When header header_substring exists
889
def indices(dimensions, dtype=int32, sparse=False): dimensions = tuple( core.concrete_or_error(operator.index, d, "dimensions argument of jnp.indices") for d in dimensions) N = len(dimensions) output = [] s = dimensions for i, dim in enumerate(dimensions): idx = lax.iota(dtype, dim) if sparse: s = (1,)*i + (dim,) + (1,)*(N - i - 1) output.append(lax.broadcast_in_dim(idx, s, (i,))) if sparse: return tuple(output) return stack(output, 0) if output else array([], dtype=dtype) _TOTAL_REPEAT_LENGTH_DOC = @_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)
\ Jax adds the optional `total_repeat_length` parameter which specifies the total number of repeat, and defaults to sum(repeats). It must be specified for repeat to be compilable. If `sum(repeats)` is larger than the specified `total_repeat_length` the remaining values will be discarded. In the case of `sum(repeats)` being smaller than the specified target length, the final value will be repeated.
59
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def indices(dimensions, dtype=int32, sparse=False): dimensions = tuple( core.concrete_or_error(operator.index, d, "dimensions argument of jnp.indices") for d in dimensions) N = len(dimensions) output = [] s = dimensions for i, dim in enumerate(dimensions): idx = lax.iota(dtype, dim) if sparse: s = (1,)*i + (dim,) + (1,)*(N - i - 1) output.append(lax.broadcast_in_dim(idx, s, (i,))) if sparse: return tuple(output) return stack(output, 0) if output else array([], dtype=dtype) _TOTAL_REPEAT_LENGTH_DOC = @_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC) ``` ###Assistant : \ Jax adds the optional `total_repeat_length` parameter which specifies the total number of repeat, and defaults to sum(repeats). It must be specified for repeat to be compilable. If `sum(repeats)` is larger than the specified `total_repeat_length` the remaining values will be discarded. In the case of `sum(repeats)` being smaller than the specified target length, the final value will be repeated.
890
def test_open_connection(tctx): assert Playbook(tcp.TCPLayer(tctx, True)) << OpenConnection(tctx.server) tctx.server.timestamp_start = 1624544785 assert Playbook(tcp.TCPLayer(tctx, True)) << None
If there is no server connection yet, establish one, because the server may send data first.
16
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_open_connection(tctx): assert Playbook(tcp.TCPLayer(tctx, True)) << OpenConnection(tctx.server) tctx.server.timestamp_start = 1624544785 assert Playbook(tcp.TCPLayer(tctx, True)) << None ``` ###Assistant : If there is no server connection yet, establish one, because the server may send data first.
891
def get_connected_endpoints(self, obj): endpoints = obj.connected_endpoints if endpoints: serializer = get_serializer_for_model(endpoints[0], prefix='Nested') context = {'request': self.context['request']} return serializer(endpoints, many=True, context=context).data
Return the appropriate serializer for the type of connected object.
10
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_connected_endpoints(self, obj): endpoints = obj.connected_endpoints if endpoints: serializer = get_serializer_for_model(endpoints[0], prefix='Nested') context = {'request': self.context['request']} return serializer(endpoints, many=True, context=context).data ``` ###Assistant : Return the appropriate serializer for the type of connected object.
892
def test_basic(self): trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] actual = nx.join(trees) expected = nx.full_rary_tree(2, 2**3 - 1) assert nx.is_isomorphic(actual, expected)
Tests for joining multiple subtrees at a root node.
9
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_basic(self): trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] actual = nx.join(trees) expected = nx.full_rary_tree(2, 2**3 - 1) assert nx.is_isomorphic(actual, expected) ``` ###Assistant : Tests for joining multiple subtrees at a root node.
893
def test_result_list_html(self): new_parent = Parent.objects.create(name="parent") new_child = Child.objects.create(name="name", parent=new_parent) request = self.factory.get("/child/") request.user = self.superuser m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) cl.formset = None template = Template( "{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}" ) context = Context({"cl": cl, "opts": Child._meta}) table_output = template.render(context) link = reverse("admin:admin_changelist_child_change", args=(new_child.id,)) row_html = build_tbody_html( new_child.id, link, '<td class="field-parent nowrap">%s</td>' % new_parent ) self.assertNotEqual( table_output.find(row_html), -1, "Failed to find expected row element: %s" % table_output, )
Inclusion tag result_list generates a table when with default ModelAdmin settings.
11
77
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_result_list_html(self): new_parent = Parent.objects.create(name="parent") new_child = Child.objects.create(name="name", parent=new_parent) request = self.factory.get("/child/") request.user = self.superuser m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) cl.formset = None template = Template( "{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}" ) context = Context({"cl": cl, "opts": Child._meta}) table_output = template.render(context) link = reverse("admin:admin_changelist_child_change", args=(new_child.id,)) row_html = build_tbody_html( new_child.id, link, '<td class="field-parent nowrap">%s</td>' % new_parent ) self.assertNotEqual( table_output.find(row_html), -1, "Failed to find expected row element: %s" % table_output, ) ``` ###Assistant : Inclusion tag result_list generates a table when with default ModelAdmin settings.
894
def test_delete_uploaded_image(self): # Send request response = self.client.post( reverse( "wagtailimages:delete_upload_multiple", args=(self.uploaded_image.id,) ) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Make sure the image is deleted self.assertFalse( UploadedImage.objects.filter(id=self.uploaded_image.id).exists() ) # Check JSON response_json = json.loads(response.content.decode()) self.assertTrue(response_json["success"])
This tests that a POST request to the delete view deletes the UploadedImage
13
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_uploaded_image(self): # Send request response = self.client.post( reverse( "wagtailimages:delete_upload_multiple", args=(self.uploaded_image.id,) ) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response["Content-Type"], "application/json") # Make sure the image is deleted self.assertFalse( UploadedImage.objects.filter(id=self.uploaded_image.id).exists() ) # Check JSON response_json = json.loads(response.content.decode()) self.assertTrue(response_json["success"]) ``` ###Assistant : This tests that a POST request to the delete view deletes the UploadedImage
895
def matrix(self) -> np.ndarray: if not np.any(self._matrices[self._centering]): matrix = self._matrices["legacy"].copy() matrix[:, 2] -= self.pose.offset[self._centering] self._matrices[self._centering] = matrix logger.trace("original matrix: %s, new matrix: %s", # type: ignore self._matrices["legacy"], matrix) return self._matrices[self._centering]
:class:`numpy.ndarray`: The 3x2 transformation matrix for extracting and aligning the core face area out of the original frame, with no padding or sizing applied. The returned matrix is offset for the given :attr:`centering`.
33
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def matrix(self) -> np.ndarray: if not np.any(self._matrices[self._centering]): matrix = self._matrices["legacy"].copy() matrix[:, 2] -= self.pose.offset[self._centering] self._matrices[self._centering] = matrix logger.trace("original matrix: %s, new matrix: %s", # type: ignore self._matrices["legacy"], matrix) return self._matrices[self._centering] ``` ###Assistant : :class:`numpy.ndarray`: The 3x2 transformation matrix for extracting and aligning the core face area out of the original frame, with no padding or sizing applied. The returned matrix is offset for the given :attr:`centering`.
896
def read_docstub(filename): in_documentation = False capturing = False indent_detection = '' doc_stub = [] with open(filename, 'r') as t_module_data: for line in t_module_data: if in_documentation: # start capturing the stub until indentation returns if capturing and line.startswith(indent_detection): doc_stub.append(line) elif capturing and not line.startswith(indent_detection): break elif line.lstrip().startswith('short_description:'): capturing = True # Detect that the short_description continues on the next line if it's indented more # than short_description itself. indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1) doc_stub.append(line) elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line): in_documentation = True short_description = r''.join(doc_stub).strip().rstrip('.') data = AnsibleLoader(short_description, file_name=filename).get_single_data() return data
Quickly find short_description using string methods instead of node parsing. This does not return a full set of documentation strings and is intended for operations like ansible-doc -l.
28
100
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_docstub(filename): in_documentation = False capturing = False indent_detection = '' doc_stub = [] with open(filename, 'r') as t_module_data: for line in t_module_data: if in_documentation: # start capturing the stub until indentation returns if capturing and line.startswith(indent_detection): doc_stub.append(line) elif capturing and not line.startswith(indent_detection): break elif line.lstrip().startswith('short_description:'): capturing = True # Detect that the short_description continues on the next line if it's indented more # than short_description itself. indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1) doc_stub.append(line) elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line): in_documentation = True short_description = r''.join(doc_stub).strip().rstrip('.') data = AnsibleLoader(short_description, file_name=filename).get_single_data() return data ``` ###Assistant : Quickly find short_description using string methods instead of node parsing. This does not return a full set of documentation strings and is intended for operations like ansible-doc -l.
897
def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) # def test_03_retrain_predictor(self): # query = f"RETRAIN {self.test_model_name_1}" # response = self.handler.native_query(query) # self.assertTrue(response.type == RESPONSE_TYPE.OK)
CREATE PREDICTOR {self.test_model_name_1} FROM {PG_HANDLER_NAME} (SELECT * FROM demo_data.home_rentals limit 50) PREDICT rental_price
13
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) # def test_03_retrain_predictor(self): # query = f"RETRAIN {self.test_model_name_1}" # response = self.handler.native_query(query) # self.assertTrue(response.type == RESPONSE_TYPE.OK) ``` ###Assistant : CREATE PREDICTOR {self.test_model_name_1} FROM {PG_HANDLER_NAME} (SELECT * FROM demo_data.home_rentals limit 50) PREDICT rental_price
898
def get_dependencies(dsk, key=None, task=no_default, as_list=False): if key is not None: arg = dsk[key] elif task is not no_default: arg = task else: raise ValueError("Provide either key or task") return keys_in_tasks(dsk, [arg], as_list=as_list)
Get the immediate tasks on which this task depends Examples -------- >>> inc = lambda x: x + 1 >>> add = lambda x, y: x + y >>> dsk = {'x': 1, ... 'y': (inc, 'x'), ... 'z': (add, 'x', 'y'), ... 'w': (inc, 'z'), ... 'a': (add, (inc, 'x'), 1)} >>> get_dependencies(dsk, 'x') set() >>> get_dependencies(dsk, 'y') {'x'} >>> get_dependencies(dsk, 'z') # doctest: +SKIP {'x', 'y'} >>> get_dependencies(dsk, 'w') # Only direct dependencies {'z'} >>> get_dependencies(dsk, 'a') # Ignore non-keys {'x'} >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly {'x'}
92
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dependencies(dsk, key=None, task=no_default, as_list=False): if key is not None: arg = dsk[key] elif task is not no_default: arg = task else: raise ValueError("Provide either key or task") return keys_in_tasks(dsk, [arg], as_list=as_list) ``` ###Assistant : Get the immediate tasks on which this task depends Examples -------- >>> inc = lambda x: x + 1 >>> add = lambda x, y: x + y >>> dsk = {'x': 1, ... 'y': (inc, 'x'), ... 'z': (add, 'x', 'y'), ... 'w': (inc, 'z'), ... 'a': (add, (inc, 'x'), 1)} >>> get_dependencies(dsk, 'x') set() >>> get_dependencies(dsk, 'y') {'x'} >>> get_dependencies(dsk, 'z') # doctest: +SKIP {'x', 'y'} >>> get_dependencies(dsk, 'w') # Only direct dependencies {'z'} >>> get_dependencies(dsk, 'a') # Ignore non-keys {'x'} >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly {'x'}
899
def test_dict_checkpoint_fs(self): checkpoint = self._prepare_dict_checkpoint() # Convert into fs checkpoint path = checkpoint.to_directory() self.assertIsInstance(path, str) # Create from path checkpoint = Checkpoint.from_directory(path) self.assertTrue(checkpoint._local_path) self._assert_dict_checkpoint(checkpoint)
Test conversion from dict to FS checkpoint and back.
9
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dict_checkpoint_fs(self): checkpoint = self._prepare_dict_checkpoint() # Convert into fs checkpoint path = checkpoint.to_directory() self.assertIsInstance(path, str) # Create from path checkpoint = Checkpoint.from_directory(path) self.assertTrue(checkpoint._local_path) self._assert_dict_checkpoint(checkpoint) ``` ###Assistant : Test conversion from dict to FS checkpoint and back.